aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/log.cc
blob: af8c422b80c3b21d14debb962bd722881039ec16 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
/*
 *  yosys -- Yosys Open SYnthesis Suite
 *
 *  Copyright (C) 2012  Claire Xenia Wolf <claire@yosyshq.com>
 *
 *  Permission to use, copy, modify, and/or distribute this software for any
 *  purpose with or without fee is hereby granted, provided that the above
 *  copyright notice and this permission notice appear in all copies.
 *
 *  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 *  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 *  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 *  ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 *  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 *  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 *  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 *
 */

#include "kernel/yosys.h"
#include "libs/sha1/sha1.h"
#include "backends/rtlil/rtlil_backend.h"

#if !defined(_WIN32) || defined(__MINGW32__)
#  include <sys/time.h>
#endif

#if defined(__linux__) || defined(__FreeBSD__)
#  include <dlfcn.h>
#endif

#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <vector>
#include <list>

YOSYS_NAMESPACE_BEGIN

std::vector<FILE*> log_files;
std::vector<std::ostream*> log_streams;
std::map<std::string, std::set<std::string>> log_hdump;
std::vector<YS_REGEX_TYPE> log_warn_regexes, log_nowarn_regexes, log_werror_regexes;
dict<std::string, LogExpectedItem> log_expect_log, log_expect_warning, log_expect_error;
std::set<std::string> log_warnings, log_experimentals, log_experimentals_ignored;
int log_warnings_count = 0;
int log_warnings_count_noexpect = 0;
bool log_expect_no_warnings = false;
bool log_hdump_all = false;
FILE *log_errfile = NULL;
SHA1 *log_hasher = NULL;

bool log_time = false;
bool log_error_stderr = false;
bool log_cmd_error_throw = false;
bool log_quiet_warnings = false;
int log_verbose_level;
string log_last_error;
void (*log_error_atexit)() = NULL;

int log_make_debug = 0;
int log_force_debug = 0;
int log_debug_suppressed = 0;

vector<int> header_count;
vector<char*> log_id_cache;
vector<shared_str> string_buf;
int string_buf_index = -1;

static struct timeval initial_tv = { 0, 0 };
static bool next_print_log = false;
static int log_newline_count = 0;

static void log_id_cache_clear()
{
	for (auto p : log_id_cache)
		free(p);
	log_id_cache.clear();
}

#if defined(_WIN32) && !defined(__MINGW32__)
// this will get time information and return it in timeval, simulating gettimeofday()
int gettimeofday(struct timeval *tv, struct timezone *tz)
{
	LARGE_INTEGER counter;
	LARGE_INTEGER freq;

	QueryPerformanceFrequency(&freq);
	QueryPerformanceCounter(&counter);

	counter.QuadPart *= 1000000;
	counter.QuadPart /= freq.QuadPart;

	tv->tv_sec = long(counter.QuadPart / 1000000);
	tv->tv_usec = counter.QuadPart % 1000000;

	return 0;
}
#endif

void logv(const char *format, va_list ap)
{
	while (format[0] == '\n' && format[1] != 0) {
		log("\n");
		format++;
	}

	if (log_make_debug && !ys_debug(1))
		return;

	std::string str = vstringf(format, ap);

	if (str.empty())
		return;

	size_t nnl_pos = str.find_last_not_of('\n');
	if (nnl_pos == std::string::npos)
		log_newline_count += GetSize(str);
	else
		log_newline_count = GetSize(str) - nnl_pos - 1;

	if (log_hasher)
		log_hasher->update(str);

	if (log_time)
	{
		std::string time_str;

		if (next_print_log || initial_tv.tv_sec == 0) {
			next_print_log = false;
			struct timeval tv;
			gettimeofday(&tv, NULL);
			if (initial_tv.tv_sec == 0)
				initial_tv = tv;
			if (tv.tv_usec < initial_tv.tv_usec) {
				tv.tv_sec--;
				tv.tv_usec += 1000000;
			}
			tv.tv_sec -= initial_tv.tv_sec;
			tv.tv_usec -= initial_tv.tv_usec;
			time_str += stringf("[%05d.%06d] ", int(tv.tv_sec), int(tv.tv_usec));
		}

		if (format[0] && format[strlen(format)-1] == '\n')
			next_print_log = true;

		for (auto f : log_files)
			fputs(time_str.c_str(), f);

		for (auto f : log_streams)
			*f << time_str;
	}

	for (auto f : log_files)
		fputs(str.c_str(), f);

	for (auto f : log_streams)
		*f << str;

	static std::string linebuffer;
	static bool log_warn_regex_recusion_guard = false;

	if (!log_warn_regex_recusion_guard)
	{
		log_warn_regex_recusion_guard = true;

		if (log_warn_regexes.empty() && log_expect_log.empty())
		{
			linebuffer.clear();
		}
		else
		{
			linebuffer += str;

			if (!linebuffer.empty() && linebuffer.back() == '\n') {
				for (auto &re : log_warn_regexes)
					if (YS_REGEX_NS::regex_search(linebuffer, re))
						log_warning("Found log message matching -W regex:\n%s", str.c_str());

				for (auto &item : log_expect_log)
					if (YS_REGEX_NS::regex_search(linebuffer, item.second.pattern))
						item.second.current_count++;

				linebuffer.clear();
			}
		}

		log_warn_regex_recusion_guard = false;
	}
}

void logv_header(RTLIL::Design *design, const char *format, va_list ap)
{
	bool pop_errfile = false;

	log_spacer();
	if (header_count.size() > 0)
		header_count.back()++;

	if (int(header_count.size()) <= log_verbose_level && log_errfile != NULL) {
		log_files.push_back(log_errfile);
		pop_errfile = true;
	}

	std::string header_id;

	for (int c : header_count)
		header_id += stringf("%s%d", header_id.empty() ? "" : ".", c);

	log("%s. ", header_id.c_str());
	logv(format, ap);
	log_flush();

	if (log_hdump_all)
		log_hdump[header_id].insert("yosys_dump_" + header_id + ".il");

	if (log_hdump.count(header_id) && design != nullptr)
		for (auto &filename : log_hdump.at(header_id)) {
			log("Dumping current design to '%s'.\n", filename.c_str());
			if (yosys_xtrace)
				IdString::xtrace_db_dump();
			Pass::call(design, {"dump", "-o", filename});
			if (yosys_xtrace)
				log("#X# -- end of dump --\n");
		}

	if (pop_errfile)
		log_files.pop_back();
}

static void logv_warning_with_prefix(const char *prefix,
                                     const char *format, va_list ap)
{
	std::string message = vstringf(format, ap);
	bool suppressed = false;

	for (auto &re : log_nowarn_regexes)
		if (YS_REGEX_NS::regex_search(message, re))
			suppressed = true;

	if (suppressed)
	{
		log("Suppressed %s%s", prefix, message.c_str());
	}
	else
	{
		int bak_log_make_debug = log_make_debug;
		log_make_debug = 0;

		for (auto &re : log_werror_regexes)
			if (YS_REGEX_NS::regex_search(message, re))
				log_error("%s",  message.c_str());

		bool warning_match = false;
		for (auto &item : log_expect_warning)
			if (YS_REGEX_NS::regex_search(message, item.second.pattern)) {
				item.second.current_count++;
				warning_match = true;
			}

		if (log_warnings.count(message))
		{
			log("%s%s", prefix, message.c_str());
			log_flush();
		}
		else
		{
			if (log_errfile != NULL && !log_quiet_warnings)
				log_files.push_back(log_errfile);

			log("%s%s", prefix, message.c_str());
			log_flush();

			if (log_errfile != NULL && !log_quiet_warnings)
				log_files.pop_back();

			log_warnings.insert(message);
		}

		if (!warning_match)
			log_warnings_count_noexpect++;
		log_warnings_count++;
		log_make_debug = bak_log_make_debug;
	}
}

void logv_warning(const char *format, va_list ap)
{
	logv_warning_with_prefix("Warning: ", format, ap);
}

void logv_warning_noprefix(const char *format, va_list ap)
{
	logv_warning_with_prefix("", format, ap);
}

void log_file_warning(const std::string &filename, int lineno,
                      const char *format, ...)
{
	va_list ap;
	va_start(ap, format);
	std::string prefix = stringf("%s:%d: Warning: ",
			filename.c_str(), lineno);
	logv_warning_with_prefix(prefix.c_str(), format, ap);
	va_end(ap);
}

void log_file_info(const std::string &filename, int lineno,
                      const char *format, ...)
{
	va_list ap;
	va_start(ap, format);
	std::string fmt = stringf("%s:%d: Info: %s",
			filename.c_str(), lineno, format);
	logv(fmt.c_str(), ap);
	va_end(ap);
}

[[noreturn]]
static void logv_error_with_prefix(const char *prefix,
                                   const char *format, va_list ap)
{
#ifdef EMSCRIPTEN
	auto backup_log_files = log_files;
#endif
	int bak_log_make_debug = log_make_debug;
	log_make_debug = 0;
	log_suppressed();

	if (log_errfile != NULL)
		log_files.push_back(log_errfile);

	if (log_error_stderr)
		for (auto &f : log_files)
			if (f == stdout)
				f = stderr;

	log_last_error = vstringf(format, ap);
	log("%s%s", prefix, log_last_error.c_str());
	log_flush();

	log_make_debug = bak_log_make_debug;

	for (auto &item : log_expect_error)
		if (YS_REGEX_NS::regex_search(log_last_error, item.second.pattern))
			item.second.current_count++;

	log_check_expected();

	if (log_error_atexit)
		log_error_atexit();

	YS_DEBUGTRAP_IF_DEBUGGING;
	const char *e = getenv("YOSYS_ABORT_ON_LOG_ERROR");
	if (e && atoi(e))
		abort();

#ifdef EMSCRIPTEN
	log_files = backup_log_files;
	throw 0;
#elif defined(_MSC_VER)
	_exit(1);
#else
	_Exit(1);
#endif
}

void logv_error(const char *format, va_list ap)
{
	logv_error_with_prefix("ERROR: ", format, ap);
}

void log_file_error(const string &filename, int lineno,
                    const char *format, ...)
{
	va_list ap;
	va_start(ap, format);
	std::string prefix = stringf("%s:%d: ERROR: ",
				     filename.c_str(), lineno);
	logv_error_with_prefix(prefix.c_str(), format, ap);
}

void log(const char *format, ...)
{
	va_list ap;
	va_start(ap, format);
	logv(format, ap);
	va_end(ap);
}

void log_header(RTLIL::Design *design, const char *format, ...)
{
	va_list ap;
	va_start(ap, format);
	logv_header(design, format, ap);
	va_end(ap);
}

void log_warning(const char *format, ...)
{
	va_list ap;
	va_start(ap, format);
	logv_warning(format, ap);
	va_end(ap);
}

void log_experimental(const char *format, ...)
{
	va_list ap;
	va_start(ap, format);
	string s = vstringf(format, ap);
	va_end(ap);

	if (log_experimentals_ignored.count(s) == 0 && log_experimentals.count(s) == 0) {
		log_warning("Feature '%s' is experimental.\n", s.c_str());
		log_experimentals.insert(s);
	}
}

void log_warning_noprefix(const char *format, ...)
{
	va_list ap;
	va_start(ap, format);
	logv_warning_noprefix(format, ap);
	va_end(ap);
}

void log_error(const char *format, ...)
{
	va_list ap;
	va_start(ap, format);
	logv_error(format, ap);
}

void log_cmd_error(const char *format, ...)
{
	va_list ap;
	va_start(ap, format);

	if (log_cmd_error_throw) {
		log_last_error = vstringf(format, ap);
		log("ERROR: %s", log_last_error.c_str());
		log_flush();
		throw log_cmd_error_exception();
	}

	logv_error(format, ap);
}

void log_spacer()
{
	if (log_newline_count < 2) log("\n");
	if (log_newline_count < 2) log("\n");
}

void log_push()
{
	header_count.push_back(0);
}

void log_pop()
{
	header_count.pop_back();
	log_id_cache_clear();
	string_buf.clear();
	string_buf_index = -1;
	log_flush();
}

#if (defined(__linux__) || defined(__FreeBSD__)) && defined(YOSYS_ENABLE_PLUGINS)
void log_backtrace(const char *prefix, int levels)
{
	if (levels <= 0) return;

	Dl_info dli;
	void *p;

	if ((p = __builtin_extract_return_addr(__builtin_return_address(0))) && dladdr(p, &dli)) {
		log("%sframe #1: %p %s(%p) %s(%p)\n", prefix, p, dli.dli_fname, dli.dli_fbase, dli.dli_sname, dli.dli_saddr);
	} else {
		log("%sframe #1: ---\n", prefix);
		return;
	}

	if (levels <= 1) return;

#ifndef DEBUG
	log("%sframe #2: [build Yosys with ENABLE_DEBUG for deeper backtraces]\n", prefix);
#else
	if ((p = __builtin_extract_return_addr(__builtin_return_address(1))) && dladdr(p, &dli)) {
		log("%sframe #2: %p %s(%p) %s(%p)\n", prefix, p, dli.dli_fname, dli.dli_fbase, dli.dli_sname, dli.dli_saddr);
	} else {
		log("%sframe #2: ---\n", prefix);
		return;
	}

	if (levels <= 2) return;

	if ((p = __builtin_extract_return_addr(__builtin_return_address(2))) && dladdr(p, &dli)) {
		log("%sframe #3: %p %s(%p) %s(%p)\n", prefix, p, dli.dli_fname, dli.dli_fbase, dli.dli_sname, dli.dli_saddr);
	} else {
		log("%sframe #3: ---\n", prefix);
		return;
	}

	if (levels <= 3) return;

	if ((p = __builtin_extract_return_addr(__builtin_return_address(3))) && dladdr(p, &dli)) {
		log("%sframe #4: %p %s(%p) %s(%p)\n", prefix, p, dli.dli_fname, dli.dli_fbase, dli.dli_sname, dli.dli_saddr);
	} else {
		log("%sframe #4: ---\n", prefix);
		return;
	}

	if (levels <= 4) return;

	if ((p = __builtin_extract_return_addr(__builtin_return_address(4))) && dladdr(p, &dli)) {
		log("%sframe #5: %p %s(%p) %s(%p)\n", prefix, p, dli.dli_fname, dli.dli_fbase, dli.dli_sname, dli.dli_saddr);
	} else {
		log("%sframe #5: ---\n", prefix);
		return;
	}

	if (levels <= 5) return;

	if ((p = __builtin_extract_return_addr(__builtin_return_address(5))) && dladdr(p, &dli)) {
		log("%sframe #6: %p %s(%p) %s(%p)\n", prefix, p, dli.dli_fname, dli.dli_fbase, dli.dli_sname, dli.dli_saddr);
	} else {
		log("%sframe #6: ---\n", prefix);
		return;
	}

	if (levels <= 6) return;

	if ((p = __builtin_extract_return_addr(__builtin_return_address(6))) && dladdr(p, &dli)) {
		log("%sframe #7: %p %s(%p) %s(%p)\n", prefix, p, dli.dli_fname, dli.dli_fbase, dli.dli_sname, dli.dli_saddr);
	} else {
		log("%sframe #7: ---\n", prefix);
		return;
	}

	if (levels <= 7) return;

	if ((p = __builtin_extract_return_addr(__builtin_return_address(7))) && dladdr(p, &dli)) {
		log("%sframe #8: %p %s(%p) %s(%p)\n", prefix, p, dli.dli_fname, dli.dli_fbase, dli.dli_sname, dli.dli_saddr);
	} else {
		log("%sframe #8: ---\n", prefix);
		return;
	}

	if (levels <= 8) return;

	if ((p = __builtin_extract_return_addr(__builtin_return_address(8))) && dladdr(p, &dli)) {
		log("%sframe #9: %p %s(%p) %s(%p)\n", prefix, p, dli.dli_fname, dli.dli_fbase, dli.dli_sname, dli.dli_saddr);
	} else {
		log("%sframe #9: ---\n", prefix);
		return;
	}

	if (levels <= 9) return;
#endif
}
#else
void log_backtrace(const char*, int) { }
#endif

void log_reset_stack()
{
	while (header_count.size() > 1)
		header_count.pop_back();
	log_id_cache_clear();
	string_buf.clear();
	string_buf_index = -1;
	log_flush();
}

void log_flush()
{
	for (auto f : log_files)
		fflush(f);

	for (auto f : log_streams)
		f->flush();
}

void log_dump_val_worker(RTLIL::IdString v) {
	log("%s", log_id(v));
}

void log_dump_val_worker(RTLIL::SigSpec v) {
	log("%s", log_signal(v));
}

void log_dump_val_worker(RTLIL::State v) {
	log("%s", log_signal(v));
}

const char *log_signal(const RTLIL::SigSpec &sig, bool autoint)
{
	std::stringstream buf;
	RTLIL_BACKEND::dump_sigspec(buf, sig, autoint);

	if (string_buf.size() < 100) {
		string_buf.push_back(buf.str());
		return string_buf.back().c_str();
	} else {
		if (++string_buf_index == 100)
			string_buf_index = 0;
		string_buf[string_buf_index] = buf.str();
		return string_buf[string_buf_index].c_str();
	}
}

const char *log_const(const RTLIL::Const &value, bool autoint)
{
	if ((value.flags & RTLIL::CONST_FLAG_STRING) == 0)
		return log_signal(value, autoint);

	std::string str = "\"" + value.decode_string() + "\"";

	if (string_buf.size() < 100) {
		string_buf.push_back(str);
		return string_buf.back().c_str();
	} else {
		if (++string_buf_index == 100)
			string_buf_index = 0;
		string_buf[string_buf_index] = str;
		return string_buf[string_buf_index].c_str();
	}
}

const char *log_id(const RTLIL::IdString &str)
{
	log_id_cache.push_back(strdup(str.c_str()));
	const char *p = log_id_cache.back();
	if (p[0] != '\\')
		return p;
	if (p[1] == '$' || p[1] == '\\' || p[1] == 0)
		return p;
	if (p[1] >= '0' && p[1] <= '9')
		return p;
	return p+1;
}

void log_module(RTLIL::Module *module, std::string indent)
{
	std::stringstream buf;
	RTLIL_BACKEND::dump_module(buf, indent, module, module->design, false);
	log("%s", buf.str().c_str());
}

void log_cell(RTLIL::Cell *cell, std::string indent)
{
	std::stringstream buf;
	RTLIL_BACKEND::dump_cell(buf, indent, cell);
	log("%s", buf.str().c_str());
}

void log_wire(RTLIL::Wire *wire, std::string indent)
{
	std::stringstream buf;
	RTLIL_BACKEND::dump_wire(buf, indent, wire);
	log("%s", buf.str().c_str());
}

void log_check_expected()
{
	// copy out all of the expected logs so that they cannot be re-checked
	// or match against themselves
	dict<std::string, LogExpectedItem> expect_log, expect_warning, expect_error;
	std::swap(expect_warning, log_expect_warning);
	std::swap(expect_log, log_expect_log);
	std::swap(expect_error, log_expect_error);

	for (auto &item : expect_warning) {
		if (item.second.current_count == 0) {
			log_warn_regexes.clear();
			log_error("Expected warning pattern '%s' not found !\n", item.first.c_str());
		}
		if (item.second.current_count != item.second.expected_count) {
			log_warn_regexes.clear();
			log_error("Expected warning pattern '%s' found %d time(s), instead of %d time(s) !\n",
				item.first.c_str(), item.second.current_count, item.second.expected_count);
		}
	}

	for (auto &item : expect_log) {
		if (item.second.current_count == 0) {
			log_warn_regexes.clear();
			log_error("Expected log pattern '%s' not found !\n", item.first.c_str());
		}
		if (item.second.current_count != item.second.expected_count) {
			log_warn_regexes.clear();
			log_error("Expected log pattern '%s' found %d time(s), instead of %d time(s) !\n",
				item.first.c_str(), item.second.current_count, item.second.expected_count);
		}
	}

	for (auto &item : expect_error)
		if (item.second.current_count == item.second.expected_count) {
			log_warn_regexes.clear();
			log("Expected error pattern '%s' found !!!\n", item.first.c_str());
			#ifdef EMSCRIPTEN
				throw 0;
			#elif defined(_MSC_VER)
				_exit(0);
			#else
				_Exit(0);
			#endif
		} else {
			log_warn_regexes.clear();
			log_error("Expected error pattern '%s' not found !\n", item.first.c_str());
		}
}

// ---------------------------------------------------
// This is the magic behind the code coverage counters
// ---------------------------------------------------
#if defined(YOSYS_ENABLE_COVER) && (defined(__linux__) || defined(__FreeBSD__))

dict<std::string, std::pair<std::string, int>> extra_coverage_data;

void cover_extra(std::string parent, std::string id, bool increment) {
	if (extra_coverage_data.count(id) == 0) {
		for (CoverData *p = __start_yosys_cover_list; p != __stop_yosys_cover_list; p++)
			if (p->id == parent)
				extra_coverage_data[id].first = stringf("%s:%d:%s", p->file, p->line, p->func);
		log_assert(extra_coverage_data.count(id));
	}
	if (increment)
		extra_coverage_data[id].second++;
}

dict<std::string, std::pair<std::string, int>> get_coverage_data()
{
	dict<std::string, std::pair<std::string, int>> coverage_data;

	for (auto &it : pass_register) {
		std::string key = stringf("passes.%s", it.first.c_str());
		coverage_data[key].first = stringf("%s:%d:%s", __FILE__, __LINE__, __FUNCTION__);
		coverage_data[key].second += it.second->call_counter;
	}

	for (auto &it : extra_coverage_data) {
		if (coverage_data.count(it.first))
			log_warning("found duplicate coverage id \"%s\".\n", it.first.c_str());
		coverage_data[it.first].first = it.second.first;
		coverage_data[it.first].second += it.second.second;
	}

	for (CoverData *p = __start_yosys_cover_list; p != __stop_yosys_cover_list; p++) {
		if (coverage_data.count(p->id))
			log_warning("found duplicate coverage id \"%s\".\n", p->id);
		coverage_data[p->id].first = stringf("%s:%d:%s", p->file, p->line, p->func);
		coverage_data[p->id].second += p->counter;
	}

	for (auto &it : coverage_data)
		if (!it.second.first.compare(0, strlen(YOSYS_SRC "/"), YOSYS_SRC "/"))
			it.second.first = it.second.first.substr(strlen(YOSYS_SRC "/"));

	return coverage_data;
}

#endif

YOSYS_NAMESPACE_END
="o">= mp_irqs[idx].mpc_srcbus; int trigger; /* * Determine IRQ trigger mode (edge or level sensitive): */ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3) { case 0: /* conforms, ie. bus-type dependent */ { switch (mp_bus_id_to_type[bus]) { case MP_BUS_ISA: /* ISA pin */ { trigger = default_ISA_trigger(idx); break; } case MP_BUS_EISA: /* EISA pin */ { trigger = default_EISA_trigger(idx); break; } case MP_BUS_PCI: /* PCI pin */ { trigger = default_PCI_trigger(idx); break; } case MP_BUS_MCA: /* MCA pin */ { trigger = default_MCA_trigger(idx); break; } case MP_BUS_NEC98: /* NEC 98 pin */ { trigger = default_NEC98_trigger(idx); break; } default: { printk(KERN_WARNING "broken BIOS!!\n"); trigger = 1; break; } } break; } case 1: /* edge */ { trigger = 0; break; } case 2: /* reserved */ { printk(KERN_WARNING "broken BIOS!!\n"); trigger = 1; break; } case 3: /* level */ { trigger = 1; break; } default: /* invalid */ { printk(KERN_WARNING "broken BIOS!!\n"); trigger = 0; break; } } return trigger; } static inline int irq_polarity(int idx) { return MPBIOS_polarity(idx); } static inline int irq_trigger(int idx) { return MPBIOS_trigger(idx); } static int pin_2_irq(int idx, int apic, int pin) { int irq, i; int bus = mp_irqs[idx].mpc_srcbus; /* * Debugging check, we are in big trouble if this message pops up! */ if (mp_irqs[idx].mpc_dstirq != pin) printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); switch (mp_bus_id_to_type[bus]) { case MP_BUS_ISA: /* ISA pin */ case MP_BUS_EISA: case MP_BUS_MCA: case MP_BUS_NEC98: { irq = mp_irqs[idx].mpc_srcbusirq; break; } case MP_BUS_PCI: /* PCI pin */ { /* * PCI IRQs are mapped in order */ i = irq = 0; while (i < apic) irq += nr_ioapic_registers[i++]; irq += pin; /* * For MPS mode, so far only needed by ES7000 platform */ if (ioapic_renumber_irq) irq = ioapic_renumber_irq(apic, irq); break; } default: { printk(KERN_ERR "unknown bus type %d.\n",bus); irq = 0; break; } } return irq; } static inline int IO_APIC_irq_trigger(int irq) { int apic, idx, pin; for (apic = 0; apic < nr_ioapics; apic++) { for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { idx = find_irq_entry(apic,pin,mp_INT); if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin))) return irq_trigger(idx); } } /* * nonexistent IRQs are edge default */ return 0; } /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly; int assign_irq_vector(int irq) { static unsigned current_vector = FIRST_DYNAMIC_VECTOR, offset = 0; unsigned vector; BUG_ON(irq >= NR_IRQ_VECTORS); spin_lock(&vector_lock); if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) { spin_unlock(&vector_lock); return IO_APIC_VECTOR(irq); } next: current_vector += 8; /* Skip the hypercall vector. */ if (current_vector == HYPERCALL_VECTOR) goto next; /* Skip the Linux/BSD fast-trap vector. */ if (current_vector == 0x80) goto next; if (current_vector > LAST_DYNAMIC_VECTOR) { offset++; if (!(offset%8)) { spin_unlock(&vector_lock); return -ENOSPC; } current_vector = FIRST_DYNAMIC_VECTOR + offset; } vector = current_vector; vector_irq[vector] = irq; if (irq != AUTO_ASSIGN) IO_APIC_VECTOR(irq) = vector; spin_unlock(&vector_lock); return vector; } static struct hw_interrupt_type ioapic_level_type; static struct hw_interrupt_type ioapic_edge_type; #define IOAPIC_AUTO -1 #define IOAPIC_EDGE 0 #define IOAPIC_LEVEL 1 static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger) { if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || trigger == IOAPIC_LEVEL) irq_desc[vector].handler = &ioapic_level_type; else irq_desc[vector].handler = &ioapic_edge_type; } static void __init setup_IO_APIC_irqs(void) { struct IO_APIC_route_entry entry; int apic, pin, idx, irq, first_notcon = 1, vector; unsigned long flags; apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); for (apic = 0; apic < nr_ioapics; apic++) { for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { /* * add it to the IO-APIC irq-routing table: */ memset(&entry,0,sizeof(entry)); entry.delivery_mode = INT_DELIVERY_MODE; entry.dest_mode = INT_DEST_MODE; entry.mask = 0; /* enable IRQ */ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); idx = find_irq_entry(apic,pin,mp_INT); if (idx == -1) { if (first_notcon) { apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin); first_notcon = 0; } else apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin); continue; } entry.trigger = irq_trigger(idx); entry.polarity = irq_polarity(idx); if (irq_trigger(idx)) { entry.trigger = 1; entry.mask = 1; } irq = pin_2_irq(idx, apic, pin); /* * skip adding the timer int on secondary nodes, which causes * a small but painful rift in the time-space continuum */ if (multi_timer_check(apic, irq)) continue; else add_pin_to_irq(irq, apic, pin); if (!apic && !IO_APIC_IRQ(irq)) continue; if (IO_APIC_IRQ(irq)) { vector = assign_irq_vector(irq); entry.vector = vector; ioapic_register_intr(irq, vector, IOAPIC_AUTO); if (!apic && (irq < 16)) disable_8259A_irq(irq); } spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1)); io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0)); set_native_irq_info(entry.vector, TARGET_CPUS); spin_unlock_irqrestore(&ioapic_lock, flags); } } if (!first_notcon) apic_printk(APIC_VERBOSE, " not connected.\n"); } /* * Set up the 8259A-master output pin: */ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector) { struct IO_APIC_route_entry entry; unsigned long flags; memset(&entry,0,sizeof(entry)); disable_8259A_irq(0); /* mask LVT0 */ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); /* * We use logical delivery to get the timer IRQ * to the first CPU. */ entry.dest_mode = INT_DEST_MODE; entry.mask = 0; /* unmask IRQ now */ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); entry.delivery_mode = INT_DELIVERY_MODE; entry.polarity = 0; entry.trigger = 0; entry.vector = vector; /* * The timer IRQ doesn't have to know that behind the * scene we have a 8259A-master in AEOI mode ... */ irq_desc[IO_APIC_VECTOR(0)].handler = &ioapic_edge_type; /* * Add it to the IO-APIC irq-routing table: */ spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1)); io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0)); spin_unlock_irqrestore(&ioapic_lock, flags); enable_8259A_irq(0); } static inline void UNEXPECTED_IO_APIC(void) { } void __init __print_IO_APIC(void) { int apic, i; union IO_APIC_reg_00 reg_00; union IO_APIC_reg_01 reg_01; union IO_APIC_reg_02 reg_02; union IO_APIC_reg_03 reg_03; unsigned long flags; printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); for (i = 0; i < nr_ioapics; i++) printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]); /* * We are a bit conservative about what we expect. We have to * know about every hardware change ASAP. */ printk(KERN_INFO "testing the IO APIC.......................\n"); for (apic = 0; apic < nr_ioapics; apic++) { spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(apic, 0); reg_01.raw = io_apic_read(apic, 1); if (reg_01.bits.version >= 0x10) reg_02.raw = io_apic_read(apic, 2); if (reg_01.bits.version >= 0x20) reg_03.raw = io_apic_read(apic, 3); spin_unlock_irqrestore(&ioapic_lock, flags); printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid); printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); if (reg_00.bits.ID >= get_physical_broadcast()) UNEXPECTED_IO_APIC(); if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2) UNEXPECTED_IO_APIC(); printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw); printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries); if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */ (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */ (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */ (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */ (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */ (reg_01.bits.entries != 0x2E) && (reg_01.bits.entries != 0x3F) ) UNEXPECTED_IO_APIC(); printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version); if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */ (reg_01.bits.version != 0x10) && /* oldest IO-APICs */ (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */ (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */ (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */ ) UNEXPECTED_IO_APIC(); if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2) UNEXPECTED_IO_APIC(); /* * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, * but the value of reg_02 is read as the previous read register * value, so ignore it if reg_02 == reg_01. */ if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2) UNEXPECTED_IO_APIC(); } /* * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 * or reg_03, but the value of reg_0[23] is read as the previous read * register value, so ignore it if reg_03 == reg_0[12]. */ if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && reg_03.raw != reg_01.raw) { printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); if (reg_03.bits.__reserved_1) UNEXPECTED_IO_APIC(); } printk(KERN_DEBUG ".... IRQ redirection table:\n"); printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol" " Stat Dest Deli Vect: \n"); for (i = 0; i <= reg_01.bits.entries; i++) { struct IO_APIC_route_entry entry; spin_lock_irqsave(&ioapic_lock, flags); *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2); *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2); spin_unlock_irqrestore(&ioapic_lock, flags); printk(KERN_DEBUG " %02x %03X %02X ", i, entry.dest.logical.logical_dest, entry.dest.physical.physical_dest ); printk("%1d %1d %1d %1d %1d %1d %1d %02X\n", entry.mask, entry.trigger, entry.irr, entry.polarity, entry.delivery_status, entry.dest_mode, entry.delivery_mode, entry.vector ); } } printk(KERN_INFO "Using vector-based indexing\n"); printk(KERN_DEBUG "IRQ to pin mappings:\n"); for (i = 0; i < NR_IRQS; i++) { struct irq_pin_list *entry = irq_2_pin + i; if (entry->pin < 0) continue; printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i)); for (;;) { printk("-> %d:%d", entry->apic, entry->pin); if (!entry->next) break; entry = irq_2_pin + entry->next; } printk("\n"); } printk(KERN_INFO ".................................... done.\n"); return; } void print_IO_APIC(void) { if (apic_verbosity != APIC_QUIET) __print_IO_APIC(); } void print_IO_APIC_keyhandler(unsigned char key) { __print_IO_APIC(); } static void __init enable_IO_APIC(void) { union IO_APIC_reg_01 reg_01; int i8259_apic, i8259_pin; int i, apic; unsigned long flags; for (i = 0; i < PIN_MAP_SIZE; i++) { irq_2_pin[i].pin = -1; irq_2_pin[i].next = 0; } /* Initialise dynamic irq_2_pin free list. */ for (i = NR_IRQS; i < PIN_MAP_SIZE; i++) irq_2_pin[i].next = i + 1; /* * The number of IO-APIC IRQ registers (== #pins): */ for (apic = 0; apic < nr_ioapics; apic++) { spin_lock_irqsave(&ioapic_lock, flags); reg_01.raw = io_apic_read(apic, 1); spin_unlock_irqrestore(&ioapic_lock, flags); nr_ioapic_registers[apic] = reg_01.bits.entries+1; } for(apic = 0; apic < nr_ioapics; apic++) { int pin; /* See if any of the pins is in ExtINT mode */ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { struct IO_APIC_route_entry entry; spin_lock_irqsave(&ioapic_lock, flags); *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin); *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin); spin_unlock_irqrestore(&ioapic_lock, flags); /* If the interrupt line is enabled and in ExtInt mode * I have found the pin where the i8259 is connected. */ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { ioapic_i8259.apic = apic; ioapic_i8259.pin = pin; goto found_i8259; } } } found_i8259: /* Look to see what if the MP table has reported the ExtINT */ /* If we could not find the appropriate pin by looking at the ioapic * the i8259 probably is not connected the ioapic but give the * mptable a chance anyway. */ i8259_pin = find_isa_irq_pin(0, mp_ExtINT); i8259_apic = find_isa_irq_apic(0, mp_ExtINT); /* Trust the MP table if nothing is setup in the hardware */ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); ioapic_i8259.pin = i8259_pin; ioapic_i8259.apic = i8259_apic; } /* Complain if the MP table and the hardware disagree */ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) { printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); } /* * Do not trust the IO-APIC being empty at bootup */ clear_IO_APIC(); } /* * Not an __init, needed by the reboot code */ void disable_IO_APIC(void) { /* * Clear the IO-APIC before rebooting: */ clear_IO_APIC(); /* * If the i8259 is routed through an IOAPIC * Put that IOAPIC in virtual wire mode * so legacy interrupts can be delivered. */ if (ioapic_i8259.pin != -1) { struct IO_APIC_route_entry entry; unsigned long flags; memset(&entry, 0, sizeof(entry)); entry.mask = 0; /* Enabled */ entry.trigger = 0; /* Edge */ entry.irr = 0; entry.polarity = 0; /* High */ entry.delivery_status = 0; entry.dest_mode = 0; /* Physical */ entry.delivery_mode = dest_ExtINT; /* ExtInt */ entry.vector = 0; entry.dest.physical.physical_dest = GET_APIC_ID(apic_read(APIC_ID)); /* * Add it to the IO-APIC irq-routing table: */ spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin, *(((int *)&entry)+1)); io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin, *(((int *)&entry)+0)); spin_unlock_irqrestore(&ioapic_lock, flags); } disconnect_bsp_APIC(ioapic_i8259.pin != -1); } /* * function to set the IO-APIC physical IDs based on the * values stored in the MPC table. * * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 */ #ifndef CONFIG_X86_NUMAQ static void __init setup_ioapic_ids_from_mpc(void) { union IO_APIC_reg_00 reg_00; physid_mask_t phys_id_present_map; int apic; int i; unsigned char old_id; unsigned long flags; /* * Don't check I/O APIC IDs for xAPIC systems. They have * no meaning without the serial APIC bus. */ if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) return; /* * This is broken; anything with a real cpu count has to * circumvent this idiocy regardless. */ phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map); /* * Set the IOAPIC ID to the value stored in the MPC table. */ for (apic = 0; apic < nr_ioapics; apic++) { /* Read the register 0 value */ spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(apic, 0); spin_unlock_irqrestore(&ioapic_lock, flags); old_id = mp_ioapics[apic].mpc_apicid; if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) { printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", apic, mp_ioapics[apic].mpc_apicid); printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", reg_00.bits.ID); mp_ioapics[apic].mpc_apicid = reg_00.bits.ID; } /* * Sanity check, is the ID really free? Every APIC in a * system must have a unique ID or we get lots of nice * 'stuck on smp_invalidate_needed IPI wait' messages. */ if (check_apicid_used(phys_id_present_map, mp_ioapics[apic].mpc_apicid)) { printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", apic, mp_ioapics[apic].mpc_apicid); for (i = 0; i < get_physical_broadcast(); i++) if (!physid_isset(i, phys_id_present_map)) break; if (i >= get_physical_broadcast()) panic("Max APIC ID exceeded!\n"); printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", i); physid_set(i, phys_id_present_map); mp_ioapics[apic].mpc_apicid = i; } else { physid_mask_t tmp; tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid); apic_printk(APIC_VERBOSE, "Setting %d in the " "phys_id_present_map\n", mp_ioapics[apic].mpc_apicid); physids_or(phys_id_present_map, phys_id_present_map, tmp); } /* * We need to adjust the IRQ routing table * if the ID changed. */ if (old_id != mp_ioapics[apic].mpc_apicid) for (i = 0; i < mp_irq_entries; i++) if (mp_irqs[i].mpc_dstapic == old_id) mp_irqs[i].mpc_dstapic = mp_ioapics[apic].mpc_apicid; /* * Read the right value from the MPC table and * write it into the ID register. */ apic_printk(APIC_VERBOSE, KERN_INFO "...changing IO-APIC physical APIC ID to %d ...", mp_ioapics[apic].mpc_apicid); reg_00.bits.ID = mp_ioapics[apic].mpc_apicid; spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(apic, 0, reg_00.raw); spin_unlock_irqrestore(&ioapic_lock, flags); /* * Sanity check */ spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(apic, 0); spin_unlock_irqrestore(&ioapic_lock, flags); if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid) printk("could not set ID!\n"); else apic_printk(APIC_VERBOSE, " ok.\n"); } } #else static void __init setup_ioapic_ids_from_mpc(void) { } #endif /* * There is a nasty bug in some older SMP boards, their mptable lies * about the timer IRQ. We do the following to work around the situation: * * - timer IRQ defaults to IO-APIC IRQ * - if this function detects that timer IRQs are defunct, then we fall * back to ISA timer IRQs */ static int __init timer_irq_works(void) { unsigned long t1 = jiffies; local_irq_enable(); /* Let ten ticks pass... */ mdelay((10 * 1000) / HZ); /* * Expect a few ticks at least, to be sure some possible * glue logic does not lock up after one or two first * ticks in a non-ExtINT mode. Also the local APIC * might have cached one ExtINT interrupt. Finally, at * least one tick may be lost due to delays. */ if (jiffies - t1 > 4) return 1; return 0; } /* * In the SMP+IOAPIC case it might happen that there are an unspecified * number of pending IRQ events unhandled. These cases are very rare, * so we 'resend' these IRQs via IPIs, to the same CPU. It's much * better to do it this way as thus we do not have to be aware of * 'pending' interrupts in the IRQ path, except at this point. */ /* * Edge triggered needs to resend any interrupt * that was delayed but this is now handled in the device * independent code. */ /* * Starting up a edge-triggered IO-APIC interrupt is * nasty - we need to make sure that we get the edge. * If it is already asserted for some reason, we need * return 1 to indicate that is was pending. * * This is not complete - we should be able to fake * an edge even if it isn't on the 8259A... */ static unsigned int startup_edge_ioapic_irq(unsigned int irq) { int was_pending = 0; unsigned long flags; spin_lock_irqsave(&ioapic_lock, flags); if (irq < 16) { disable_8259A_irq(irq); if (i8259A_irq_pending(irq)) was_pending = 1; } __unmask_IO_APIC_irq(irq); spin_unlock_irqrestore(&ioapic_lock, flags); return was_pending; } /* * Once we have recorded IRQ_PENDING already, we can mask the * interrupt for real. This prevents IRQ storms from unhandled * devices. */ static void ack_edge_ioapic_irq(unsigned int irq) { if ((irq_desc[IO_APIC_VECTOR(irq)].status & (IRQ_PENDING | IRQ_DISABLED)) == (IRQ_PENDING | IRQ_DISABLED)) mask_IO_APIC_irq(irq); ack_APIC_irq(); } /* * Level triggered interrupts can just be masked, * and shutting down and starting up the interrupt * is the same as enabling and disabling them -- except * with a startup need to return a "was pending" value. * * Level triggered interrupts are special because we * do not touch any IO-APIC register while handling * them. We ack the APIC in the end-IRQ handler, not * in the start-IRQ-handler. Protection against reentrance * from the same interrupt is still provided, both by the * generic IRQ layer and by the fact that an unacked local * APIC does not accept IRQs. */ static unsigned int startup_level_ioapic_irq (unsigned int irq) { unmask_IO_APIC_irq(irq); return 0; /* don't check for pending */ } int ioapic_ack_new = 1; static void setup_ioapic_ack(char *s) { if ( !strcmp(s, "old") ) ioapic_ack_new = 0; else if ( !strcmp(s, "new") ) ioapic_ack_new = 1; else printk("Unknown ioapic_ack value specified: '%s'\n", s); } custom_param("ioapic_ack", setup_ioapic_ack); static void mask_and_ack_level_ioapic_irq (unsigned int irq) { unsigned long v; int i; if ( ioapic_ack_new ) return; mask_IO_APIC_irq(irq); /* * It appears there is an erratum which affects at least version 0x11 * of I/O APIC (that's the 82093AA and cores integrated into various * chipsets). Under certain conditions a level-triggered interrupt is * erroneously delivered as edge-triggered one but the respective IRR * bit gets set nevertheless. As a result the I/O unit expects an EOI * message but it will never arrive and further interrupts are blocked * from the source. The exact reason is so far unknown, but the * phenomenon was observed when two consecutive interrupt requests * from a given source get delivered to the same CPU and the source is * temporarily disabled in between. * * A workaround is to simulate an EOI message manually. We achieve it * by setting the trigger mode to edge and then to level when the edge * trigger mode gets detected in the TMR of a local APIC for a * level-triggered interrupt. We mask the source for the time of the * operation to prevent an edge-triggered interrupt escaping meanwhile. * The idea is from Manfred Spraul. --macro */ i = IO_APIC_VECTOR(irq); v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); ack_APIC_irq(); if (!(v & (1 << (i & 0x1f)))) { atomic_inc(&irq_mis_count); spin_lock(&ioapic_lock); __edge_IO_APIC_irq(irq); __level_IO_APIC_irq(irq); spin_unlock(&ioapic_lock); } } static void end_level_ioapic_irq (unsigned int irq) { unsigned long v; int i; if ( !ioapic_ack_new ) { if ( !(irq_desc[IO_APIC_VECTOR(irq)].status & IRQ_DISABLED) ) unmask_IO_APIC_irq(irq); return; } /* * It appears there is an erratum which affects at least version 0x11 * of I/O APIC (that's the 82093AA and cores integrated into various * chipsets). Under certain conditions a level-triggered interrupt is * erroneously delivered as edge-triggered one but the respective IRR * bit gets set nevertheless. As a result the I/O unit expects an EOI * message but it will never arrive and further interrupts are blocked * from the source. The exact reason is so far unknown, but the * phenomenon was observed when two consecutive interrupt requests * from a given source get delivered to the same CPU and the source is * temporarily disabled in between. * * A workaround is to simulate an EOI message manually. We achieve it * by setting the trigger mode to edge and then to level when the edge * trigger mode gets detected in the TMR of a local APIC for a * level-triggered interrupt. We mask the source for the time of the * operation to prevent an edge-triggered interrupt escaping meanwhile. * The idea is from Manfred Spraul. --macro */ i = IO_APIC_VECTOR(irq); v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); ack_APIC_irq(); if (!(v & (1 << (i & 0x1f)))) { atomic_inc(&irq_mis_count); spin_lock(&ioapic_lock); __mask_IO_APIC_irq(irq); __edge_IO_APIC_irq(irq); __level_IO_APIC_irq(irq); if ( !(irq_desc[IO_APIC_VECTOR(irq)].status & IRQ_DISABLED) ) __unmask_IO_APIC_irq(irq); spin_unlock(&ioapic_lock); } } static unsigned int startup_edge_ioapic_vector(unsigned int vector) { int irq = vector_to_irq(vector); return startup_edge_ioapic_irq(irq); } static void ack_edge_ioapic_vector(unsigned int vector) { int irq = vector_to_irq(vector); ack_edge_ioapic_irq(irq); } static unsigned int startup_level_ioapic_vector(unsigned int vector) { int irq = vector_to_irq(vector); return startup_level_ioapic_irq (irq); } static void mask_and_ack_level_ioapic_vector(unsigned int vector) { int irq = vector_to_irq(vector); mask_and_ack_level_ioapic_irq(irq); } static void end_level_ioapic_vector(unsigned int vector) { int irq = vector_to_irq(vector); end_level_ioapic_irq(irq); } static void mask_IO_APIC_vector(unsigned int vector) { int irq = vector_to_irq(vector); mask_IO_APIC_irq(irq); } static void unmask_IO_APIC_vector(unsigned int vector) { int irq = vector_to_irq(vector); unmask_IO_APIC_irq(irq); } static void set_ioapic_affinity_vector( unsigned int vector, cpumask_t cpu_mask) { int irq = vector_to_irq(vector); set_native_irq_info(vector, cpu_mask); set_ioapic_affinity_irq(irq, cpu_mask); } static void disable_edge_ioapic_vector(unsigned int vector) { } static void end_edge_ioapic_vector(unsigned int vector) { } /* * Level and edge triggered IO-APIC interrupts need different handling, * so we use two separate IRQ descriptors. Edge triggered IRQs can be * handled with the level-triggered descriptor, but that one has slightly * more overhead. Level-triggered interrupts cannot be handled with the * edge-triggered handler, without risking IRQ storms and other ugly * races. */ static struct hw_interrupt_type ioapic_edge_type = { .typename = "IO-APIC-edge", .startup = startup_edge_ioapic_vector, .shutdown = disable_edge_ioapic_vector, .enable = unmask_IO_APIC_vector, .disable = disable_edge_ioapic_vector, .ack = ack_edge_ioapic_vector, .end = end_edge_ioapic_vector, .set_affinity = set_ioapic_affinity_vector, }; static struct hw_interrupt_type ioapic_level_type = { .typename = "IO-APIC-level", .startup = startup_level_ioapic_vector, .shutdown = mask_IO_APIC_vector, .enable = unmask_IO_APIC_vector, .disable = mask_IO_APIC_vector, .ack = mask_and_ack_level_ioapic_vector, .end = end_level_ioapic_vector, .set_affinity = set_ioapic_affinity_vector, }; static inline void init_IO_APIC_traps(void) { int irq; /* Xen: This is way simpler than the Linux implementation. */ for (irq = 0; irq < 16 ; irq++) if (IO_APIC_IRQ(irq) && !IO_APIC_VECTOR(irq)) make_8259A_irq(irq); } static void enable_lapic_vector(unsigned int vector) { unsigned long v; v = apic_read(APIC_LVT0); apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED); } static void disable_lapic_vector(unsigned int vector) { unsigned long v; v = apic_read(APIC_LVT0); apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED); } static void ack_lapic_vector(unsigned int vector) { ack_APIC_irq(); } static void end_lapic_vector(unsigned int vector) { /* nothing */ } static struct hw_interrupt_type lapic_irq_type = { .typename = "local-APIC-edge", .startup = NULL, /* startup_irq() not used for IRQ0 */ .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */ .enable = enable_lapic_vector, .disable = disable_lapic_vector, .ack = ack_lapic_vector, .end = end_lapic_vector }; /* * This looks a bit hackish but it's about the only one way of sending * a few INTA cycles to 8259As and any associated glue logic. ICR does * not support the ExtINT mode, unfortunately. We need to send these * cycles as some i82489DX-based boards have glue logic that keeps the * 8259A interrupt line asserted until INTA. --macro */ static inline void unlock_ExtINT_logic(void) { int apic, pin, i; struct IO_APIC_route_entry entry0, entry1; unsigned char save_control, save_freq_select; unsigned long flags; pin = find_isa_irq_pin(8, mp_INT); apic = find_isa_irq_apic(8, mp_INT); if (pin == -1) return; spin_lock_irqsave(&ioapic_lock, flags); *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin); *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin); spin_unlock_irqrestore(&ioapic_lock, flags); clear_IO_APIC_pin(apic, pin); memset(&entry1, 0, sizeof(entry1)); entry1.dest_mode = 0; /* physical delivery */ entry1.mask = 0; /* unmask IRQ now */ entry1.dest.physical.physical_dest = hard_smp_processor_id(); entry1.delivery_mode = dest_ExtINT; entry1.polarity = entry0.polarity; entry1.trigger = 0; entry1.vector = 0; spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1)); io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0)); spin_unlock_irqrestore(&ioapic_lock, flags); save_control = CMOS_READ(RTC_CONTROL); save_freq_select = CMOS_READ(RTC_FREQ_SELECT); CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, RTC_FREQ_SELECT); CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); i = 100; while (i-- > 0) { mdelay(10); if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) i -= 10; } CMOS_WRITE(save_control, RTC_CONTROL); CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); clear_IO_APIC_pin(apic, pin); spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1)); io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0)); spin_unlock_irqrestore(&ioapic_lock, flags); } int timer_uses_ioapic_pin_0; /* * This code may look a bit paranoid, but it's supposed to cooperate with * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast * fanatically on his truly buggy board. */ static inline void check_timer(void) { int apic1, pin1, apic2, pin2; int vector; /* * get/set the timer IRQ vector: */ disable_8259A_irq(0); vector = assign_irq_vector(0); irq_desc[IO_APIC_VECTOR(0)].action = irq_desc[LEGACY_VECTOR(0)].action; irq_desc[IO_APIC_VECTOR(0)].depth = 0; irq_desc[IO_APIC_VECTOR(0)].status &= ~IRQ_DISABLED; /* * Subtle, code in do_timer_interrupt() expects an AEOI * mode for the 8259A whenever interrupts are routed * through I/O APICs. Also IRQ0 has to be enabled in * the 8259A which implies the virtual wire has to be * disabled in the local APIC. */ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); init_8259A(1); /* XEN: Ripped out the legacy missed-tick logic, so below is not needed. */ /*timer_ack = 1;*/ /*enable_8259A_irq(0);*/ pin1 = find_isa_irq_pin(0, mp_INT); apic1 = find_isa_irq_apic(0, mp_INT); pin2 = ioapic_i8259.pin; apic2 = ioapic_i8259.apic; if (pin1 == 0) timer_uses_ioapic_pin_0 = 1; printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", vector, apic1, pin1, apic2, pin2); if (pin1 != -1) { /* * Ok, does IRQ0 through the IOAPIC work? */ unmask_IO_APIC_irq(0); if (timer_irq_works()) { if (disable_timer_pin_1 > 0) clear_IO_APIC_pin(apic1, pin1); return; } clear_IO_APIC_pin(apic1, pin1); printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to " "IO-APIC\n"); } printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... "); if (pin2 != -1) { printk("\n..... (found pin %d) ...", pin2); /* * legacy devices should be connected to IO APIC #0 */ setup_ExtINT_IRQ0_pin(apic2, pin2, vector); if (timer_irq_works()) { printk("works.\n"); if (pin1 != -1) replace_pin_at_irq(0, apic1, pin1, apic2, pin2); else add_pin_to_irq(0, apic2, pin2); return; } /* * Cleanup, just in case ... */ clear_IO_APIC_pin(apic2, pin2); } printk(" failed.\n"); if (nmi_watchdog == NMI_IO_APIC) { printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n"); nmi_watchdog = 0; } printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ..."); disable_8259A_irq(0); irq_desc[vector].handler = &lapic_irq_type; apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ enable_8259A_irq(0); if (timer_irq_works()) { printk(" works.\n"); return; } apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector); printk(" failed.\n"); printk(KERN_INFO "...trying to set up timer as ExtINT IRQ..."); /*timer_ack = 0;*/ init_8259A(0); make_8259A_irq(0); apic_write_around(APIC_LVT0, APIC_DM_EXTINT); unlock_ExtINT_logic(); if (timer_irq_works()) { printk(" works.\n"); return; } printk(" failed :(.\n"); panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " "report. Then try booting with the 'noapic' option"); } /* * * IRQ's that are handled by the PIC in the MPS IOAPIC case. * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ. * Linux doesn't really care, as it's not actually used * for any interrupt handling anyway. */ #define PIC_IRQS (1 << PIC_CASCADE_IR) void __init setup_IO_APIC(void) { enable_IO_APIC(); if (acpi_ioapic) io_apic_irqs = ~0; /* all IRQs go through IOAPIC */ else io_apic_irqs = ~PIC_IRQS; printk("ENABLING IO-APIC IRQs\n"); printk(" -> Using %s ACK method\n", ioapic_ack_new ? "new" : "old"); /* * Set up IO-APIC IRQ routing. */ if (!acpi_ioapic) setup_ioapic_ids_from_mpc(); sync_Arb_IDs(); setup_IO_APIC_irqs(); init_IO_APIC_traps(); check_timer(); print_IO_APIC(); register_keyhandler('z', print_IO_APIC_keyhandler, "print ioapic info"); } /* -------------------------------------------------------------------------- ACPI-based IOAPIC Configuration -------------------------------------------------------------------------- */ #ifdef CONFIG_ACPI_BOOT int __init io_apic_get_unique_id (int ioapic, int apic_id) { union IO_APIC_reg_00 reg_00; static physid_mask_t apic_id_map = PHYSID_MASK_NONE; physid_mask_t tmp; unsigned long flags; int i = 0; /* * The P4 platform supports up to 256 APIC IDs on two separate APIC * buses (one for LAPICs, one for IOAPICs), where predecessors only * supports up to 16 on one shared APIC bus. * * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full * advantage of new APIC bus architecture. */ if (physids_empty(apic_id_map)) apic_id_map = ioapic_phys_id_map(phys_cpu_present_map); spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(ioapic, 0); spin_unlock_irqrestore(&ioapic_lock, flags); if (apic_id >= get_physical_broadcast()) { printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " "%d\n", ioapic, apic_id, reg_00.bits.ID); apic_id = reg_00.bits.ID; } /* * Every APIC in a system must have a unique ID or we get lots of nice * 'stuck on smp_invalidate_needed IPI wait' messages. */ if (check_apicid_used(apic_id_map, apic_id)) { for (i = 0; i < get_physical_broadcast(); i++) { if (!check_apicid_used(apic_id_map, i)) break; } if (i == get_physical_broadcast()) panic("Max apic_id exceeded!\n"); printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " "trying %d\n", ioapic, apic_id, i); apic_id = i; } tmp = apicid_to_cpu_present(apic_id); physids_or(apic_id_map, apic_id_map, tmp); if (reg_00.bits.ID != apic_id) { reg_00.bits.ID = apic_id; spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(ioapic, 0, reg_00.raw); reg_00.raw = io_apic_read(ioapic, 0); spin_unlock_irqrestore(&ioapic_lock, flags); /* Sanity check */ if (reg_00.bits.ID != apic_id) { printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); return -1; } } apic_printk(APIC_VERBOSE, KERN_INFO "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); return apic_id; } int __init io_apic_get_version (int ioapic) { union IO_APIC_reg_01 reg_01; unsigned long flags; spin_lock_irqsave(&ioapic_lock, flags); reg_01.raw = io_apic_read(ioapic, 1); spin_unlock_irqrestore(&ioapic_lock, flags); return reg_01.bits.version; } int __init io_apic_get_redir_entries (int ioapic) { union IO_APIC_reg_01 reg_01; unsigned long flags; spin_lock_irqsave(&ioapic_lock, flags); reg_01.raw = io_apic_read(ioapic, 1); spin_unlock_irqrestore(&ioapic_lock, flags); return reg_01.bits.entries; } int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low) { struct IO_APIC_route_entry entry; unsigned long flags; if (!IO_APIC_IRQ(irq)) { printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", ioapic); return -EINVAL; } /* * Generate a PCI IRQ routing entry and program the IOAPIC accordingly. * Note that we mask (disable) IRQs now -- these get enabled when the * corresponding device driver registers for this IRQ. */ memset(&entry,0,sizeof(entry)); entry.delivery_mode = INT_DELIVERY_MODE; entry.dest_mode = INT_DEST_MODE; entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); entry.trigger = edge_level; entry.polarity = active_high_low; entry.mask = 1; /* * IRQs < 16 are already in the irq_2_pin[] map */ if (irq >= 16) add_pin_to_irq(irq, ioapic, pin); entry.vector = assign_irq_vector(irq); apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry " "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic, mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq, edge_level, active_high_low); ioapic_register_intr(irq, entry.vector, edge_level); if (!ioapic && (irq < 16)) disable_8259A_irq(irq); spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1)); io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0)); set_native_irq_info(entry.vector, TARGET_CPUS); spin_unlock_irqrestore(&ioapic_lock, flags); return 0; } #endif /*CONFIG_ACPI_BOOT*/ static int ioapic_physbase_to_id(unsigned long physbase) { int apic; for ( apic = 0; apic < nr_ioapics; apic++ ) if ( mp_ioapics[apic].mpc_apicaddr == physbase ) return apic; return -EINVAL; } int ioapic_guest_read(unsigned long physbase, unsigned int reg, u32 *pval) { int apic; unsigned long flags; if ( (apic = ioapic_physbase_to_id(physbase)) < 0 ) return apic; spin_lock_irqsave(&ioapic_lock, flags); *pval = io_apic_read(apic, reg); spin_unlock_irqrestore(&ioapic_lock, flags); return 0; } #define WARN_BOGUS_WRITE(f, a...) \ dprintk(XENLOG_INFO, "\n%s: " \ "apic=%d, pin=%d, old_irq=%d, new_irq=%d\n" \ "%s: old_entry=%08x, new_entry=%08x\n" \ "%s: " f, __FUNCTION__, apic, pin, old_irq, new_irq, \ __FUNCTION__, *(u32 *)&old_rte, *(u32 *)&new_rte, \ __FUNCTION__ , ##a ) int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val) { int apic, pin, old_irq = -1, new_irq = -1; struct IO_APIC_route_entry old_rte = { 0 }, new_rte = { 0 }; unsigned long flags; if ( (apic = ioapic_physbase_to_id(physbase)) < 0 ) return apic; /* Only write to the first half of a route entry. */ if ( (reg < 0x10) || (reg & 1) ) return 0; pin = (reg - 0x10) >> 1; /* Write first half from guest; second half is target info. */ *(u32 *)&new_rte = val; new_rte.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); /* * What about weird destination types? * SMI: Ignore? Ought to be set up by the BIOS. * NMI: Ignore? Watchdog functionality is Xen's concern. * INIT: Definitely ignore: probably a guest OS bug. * ExtINT: Ignore? Linux only asserts this at start of day. * For now, print a message and return an error. We can fix up on demand. */ if ( new_rte.delivery_mode > dest_LowestPrio ) { printk("ERROR: Attempt to write weird IOAPIC destination mode!\n"); printk(" APIC=%d/%d, lo-reg=%x\n", apic, pin, val); return -EINVAL; } /* * The guest does not know physical APIC arrangement (flat vs. cluster). * Apply genapic conventions for this platform. */ new_rte.delivery_mode = INT_DELIVERY_MODE; new_rte.dest_mode = INT_DEST_MODE; spin_lock_irqsave(&ioapic_lock, flags); /* Read first (interesting) half of current routing entry. */ *(u32 *)&old_rte = io_apic_read(apic, 0x10 + 2 * pin); /* No change to the first half of the routing entry? Bail quietly. */ if ( *(u32 *)&old_rte == *(u32 *)&new_rte ) { spin_unlock_irqrestore(&ioapic_lock, flags); return 0; } if ( old_rte.vector >= FIRST_DYNAMIC_VECTOR ) old_irq = vector_irq[old_rte.vector]; if ( new_rte.vector >= FIRST_DYNAMIC_VECTOR ) new_irq = vector_irq[new_rte.vector]; if ( (old_irq != new_irq) && (old_irq != -1) && IO_APIC_IRQ(old_irq) ) { if ( irq_desc[IO_APIC_VECTOR(old_irq)].action ) { WARN_BOGUS_WRITE("Attempt to remove IO-APIC pin of in-use IRQ!\n"); spin_unlock_irqrestore(&ioapic_lock, flags); return 0; } remove_pin_at_irq(old_irq, apic, pin); } if ( (new_irq != -1) && IO_APIC_IRQ(new_irq) ) { if ( irq_desc[IO_APIC_VECTOR(new_irq)].action ) { WARN_BOGUS_WRITE("Attempt to %s IO-APIC pin for in-use IRQ!\n", (old_irq != new_irq) ? "add" : "modify"); spin_unlock_irqrestore(&ioapic_lock, flags); return 0; } /* Set the correct irq-handling type. */ irq_desc[IO_APIC_VECTOR(new_irq)].handler = new_rte.trigger ? &ioapic_level_type: &ioapic_edge_type; if ( old_irq != new_irq ) add_pin_to_irq(new_irq, apic, pin); /* Mask iff level triggered. */ new_rte.mask = new_rte.trigger; } else if ( !new_rte.mask ) { /* This pin leads nowhere but the guest has not masked it. */ WARN_BOGUS_WRITE("Installing bogus unmasked IO-APIC entry!\n"); new_rte.mask = 1; } io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&new_rte) + 0)); io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&new_rte) + 1)); spin_unlock_irqrestore(&ioapic_lock, flags); return 0; } void dump_ioapic_irq_info(void) { struct irq_pin_list *entry; struct IO_APIC_route_entry rte; unsigned int irq, pin, printed = 0; unsigned long flags; for ( irq = 0; irq < NR_IRQS; irq++ ) { entry = &irq_2_pin[irq]; if ( entry->pin == -1 ) continue; if ( !printed++ ) printk("IO-APIC interrupt information:\n"); printk(" IRQ%3d Vec%3d:\n", irq, irq_to_vector(irq)); for ( ; ; ) { pin = entry->pin; printk(" Apic 0x%02x, Pin %2d: ", entry->apic, pin); spin_lock_irqsave(&ioapic_lock, flags); *(((int *)&rte) + 0) = io_apic_read(entry->apic, 0x10 + 2 * pin); *(((int *)&rte) + 1) = io_apic_read(entry->apic, 0x11 + 2 * pin); spin_unlock_irqrestore(&ioapic_lock, flags); printk("vector=%u, delivery_mode=%u, dest_mode=%s, " "delivery_status=%d, polarity=%d, irr=%d, " "trigger=%s, mask=%d\n", rte.vector, rte.delivery_mode, rte.dest_mode ? "logical" : "physical", rte.delivery_status, rte.polarity, rte.irr, rte.trigger ? "level" : "edge", rte.mask); if ( entry->next == 0 ) break; entry = &irq_2_pin[entry->next]; } } }