aboutsummaryrefslogtreecommitdiffstats
path: root/testhal/STM32/STM32L1xx/DAC/chconf.h
blob: e9007b68999e3aa1a4fec840d155fb44021a2fbd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
/*
    ChibiOS - Copyright (C) 2006..2018 Giovanni Di Sirio

    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at

        http://www.apache.org/licenses/LICENSE-2.0

    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
*/

/**
 * @file    rt/templates/chconf.h
 * @brief   Configuration file template.
 * @details A copy of this file must be placed in each project directory, it
 *          contains the application specific kernel settings.
 *
 * @addtogroup config
 * @details Kernel related settings and hooks.
 * @{
 */

#ifndef CHCONF_H
#define CHCONF_H

#define _CHIBIOS_RT_CONF_
#define _CHIBIOS_RT_CONF_VER_6_0_

/*===========================================================================*/
/**
 * @name System timers settings
 * @{
 */
/*===========================================================================*/

/**
 * @brief   System time counter resolution.
 * @note    Allowed values are 16 or 32 bits.
 */
#if !defined(CH_CFG_ST_RESOLUTION)
#define CH_CFG_ST_RESOLUTION                16
#endif

/**
 * @brief   System tick frequency.
 * @details Frequency of the system timer that drives the system ticks. This
 *          setting also defines the system tick time unit.
 */
#if !defined(CH_CFG_ST_FREQUENCY)
#define CH_CFG_ST_FREQUENCY                 1000
#endif

/**
 * @brief   Time intervals data size.
 * @note    Allowed values are 16, 32 or 64 bits.
 */
#if !defined(CH_CFG_INTERVALS_SIZE)
#define CH_CFG_INTERVALS_SIZE               32
#endif

/**
 * @brief   Time types data size.
 * @note    Allowed values are 16 or 32 bits.
 */
#if !defined(CH_CFG_TIME_TYPES_SIZE)
#define CH_CFG_TIME_TYPES_SIZE              32
#endif

/**
 * @brief   Time delta constant for the tick-less mode.
 * @note    If this value is zero then the system uses the classic
 *          periodic tick. This value represents the minimum number
 *          of ticks that is safe to specify in a timeout directive.
 *          The value one is not valid, timeouts are rounded up to
 *          this value.
 */
#if !defined(CH_CFG_ST_TIMEDELTA)
#define CH_CFG_ST_TIMEDELTA                 2
#endif

/** @} */

/*===========================================================================*/
/**
 * @name Kernel parameters and options
 * @{
 */
/*===========================================================================*/

/**
 * @brief   Round robin interval.
 * @details This constant is the number of system ticks allowed for the
 *          threads before preemption occurs. Setting this value to zero
 *          disables the preemption for threads with equal priority and the
 *          round robin becomes cooperative. Note that higher priority
 *          threads can still preempt, the kernel is always preemptive.
 * @note    Disabling the round robin preemption makes the kernel more compact
 *          and generally faster.
 * @note    The round robin preemption is not supported in tickless mode and
 *          must be set to zero in that case.
 */
#if !defined(CH_CFG_TIME_QUANTUM)
#define CH_CFG_TIME_QUANTUM                 0
#endif

/**
 * @brief   Managed RAM size.
 * @details Size of the RAM area to be managed by the OS. If set to zero
 *          then the whole available RAM is used. The core memory is made
 *          available to the heap allocator and/or can be used directly through
 *          the simplified core memory allocator.
 *
 * @note    In order to let the OS manage the whole RAM the linker script must
 *          provide the @p __heap_base__ and @p __heap_end__ symbols.
 * @note    Requires @p CH_CFG_USE_MEMCORE.
 */
#if !defined(CH_CFG_MEMCORE_SIZE)
#define CH_CFG_MEMCORE_SIZE                 0
#endif

/**
 * @brief   Idle thread automatic spawn suppression.
 * @details When this option is activated the function @p chSysInit()
 *          does not spawn the idle thread. The application @p main()
 *          function becomes the idle thread and must implement an
 *          infinite loop.
 */
#if !defined(CH_CFG_NO_IDLE_THREAD)
#define CH_CFG_NO_IDLE_THREAD               FALSE
#endif

/** @} */

/*===========================================================================*/
/**
 * @name Performance options
 * @{
 */
/*===========================================================================*/

/**
 * @brief   OS optimization.
 * @details If enabled then time efficient rather than space efficient code
 *          is used when two possible implementations exist.
 *
 * @note    This is not related to the compiler optimization options.
 * @note    The default is @p TRUE.
 */
#if !defined(CH_CFG_OPTIMIZE_SPEED)
#define CH_CFG_OPTIMIZE_SPEED               TRUE
#endif

/** @} */

/*===========================================================================*/
/**
 * @name Subsystem options
 * @{
 */
/*===========================================================================*/

/**
 * @brief   Time Measurement APIs.
 * @details If enabled then the time measurement APIs are included in
 *          the kernel.
 *
 * @note    The default is @p TRUE.
 */
#if !defined(CH_CFG_USE_TM)
#define CH_CFG_USE_TM                       TRUE
#endif

/**
 * @brief   Threads registry APIs.
 * @details If enabled then the registry APIs are included in the kernel.
 *
 * @note    The default is @p TRUE.
 */
#if !defined(CH_CFG_USE_REGISTRY)
#define CH_CFG_USE_REGISTRY                 TRUE
#endif

/**
 * @brief   Threads synchronization APIs.
 * @details If enabled then the @p chThdWait() function is included in
 *          the kernel.
 *
 * @note    The default is @p TRUE.
 */
#if !defined(CH_CFG_USE_WAITEXIT)
#define CH_CFG_USE_WAITEXIT                 TRUE
#endif

/**
 * @brief   Semaphores APIs.
 * @details If enabled then the Semaphores APIs are included in the kernel.
 *
 * @note    The default is @p TRUE.
 */
#if !defined(CH_CFG_USE_SEMAPHORES)
#define CH_CFG_USE_SEMAPHORES               TRUE
#endif

/**
 * @brief   Semaphores queuing mode.
 * @details If enabled then the threads are enqueued on semaphores by
 *          priority rather than in FIFO order.
 *
 * @note    The default is @p FALSE. Enable this if you have special
 *          requirements.
 * @note    Requires @p CH_CFG_USE_SEMAPHORES.
 */
#if !defined(CH_CFG_USE_SEMAPHORES_PRIORITY)
#define CH_CFG_USE_SEMAPHORES_PRIORITY      FALSE
#endif

/**
 * @brief   Mutexes APIs.
 * @details If enabled then the mutexes APIs are included in the kernel.
 *
 * @note    The default is @p TRUE.
 */
#if !defined(CH_CFG_USE_MUTEXES)
#define CH_CFG_USE_MUTEXES                  TRUE
#endif

/**
 * @brief   Enables recursive behavior on mutexes.
 * @note    Recursive mutexes are heavier and have an increased
 *          memory footprint.
 *
 * @note    The default is @p FALSE.
 * @note    Requires @p CH_CFG_USE_MUTEXES.
 */
#if !defined(CH_CFG_USE_MUTEXES_RECURSIVE)
#define CH_CFG_USE_MUTEXES_RECURSIVE        FALSE
#endif

/**
 * @brief   Conditional Variables APIs.
 * @details If enabled then the conditional variables APIs are included
 *          in the kernel.
 *
 * @note    The default is @p TRUE.
 * @note    Requires @p CH_CFG_USE_MUTEXES.
 */
#if !defined(CH_CFG_USE_CONDVARS)
#define CH_CFG_USE_CONDVARS                 TRUE
#endif

/**
 * @brief   Conditional Variables APIs with timeout.
 * @details If enabled then the conditional variables APIs with timeout
 *          specification are included in the kernel.
 *
 * @note    The default is @p TRUE.
 * @note    Requires @p CH_CFG_USE_CONDVARS.
 */
#if !defined(CH_CFG_USE_CONDVARS_TIMEOUT)
#define CH_CFG_USE_CONDVARS_TIMEOUT         TRUE
#endif

/**
 * @brief   Events Flags APIs.
 * @details If enabled then the event flags APIs are included in the kernel.
 *
 * @note    The default is @p TRUE.
 */
#if !defined(CH_CFG_USE_EVENTS)
#define CH_CFG_USE_EVENTS                   TRUE
#endif

/**
 * @brief   Events Flags APIs with timeout.
 * @details If enabled then the events APIs with timeout specification
 *          are included in the kernel.
 *
 * @note    The default is @p TRUE.
 * @note    Requires @p CH_CFG_USE_EVENTS.
 */
#if !defined(CH_CFG_USE_EVENTS_TIMEOUT)
#define CH_CFG_USE_EVENTS_TIMEOUT           TRUE
#endif

/**
 * @brief   Synchronous Messages APIs.
 * @details If enabled then the synchronous messages APIs are included
 *          in the kernel.
 *
 * @note    The default is @p TRUE.
 */
#if !defined(CH_CFG_USE_MESSAGES)
#define CH_CFG_USE_MESSAGES                 TRUE
#endif

/**
 * @brief   Synchronous Messages queuing mode.
 * @details If enabled then messages are served by priority rather than in
 *          FIFO order.
 *
 * @note    The default is @p FALSE. Enable this if you have special
 *          requirements.
 * @note    Requires @p CH_CFG_USE_MESSAGES.
 */
#if !defined(CH_CFG_USE_MESSAGES_PRIORITY)
#define CH_CFG_USE_MESSAGES_PRIORITY        FALSE
#endif

/**
 * @brief   Mailboxes APIs.
 * @details If enabled then the asynchronous messages (mailboxes) APIs are
 *          included in the kernel.
 *
 * @note    The default is @p TRUE.
 * @note    Requires @p CH_CFG_USE_SEMAPHORES.
 */
#if !defined(CH_CFG_USE_MAILBOXES)
#define CH_CFG_USE_MAILBOXES                TRUE
#endif

/**
 * @brief   Core Memory Manager APIs.
 * @details If enabled then the core memory manager APIs are included
 *          in the kernel.
 *
 * @note    The default is @p TRUE.
 */
#if !defined(CH_CFG_USE_MEMCORE)
#define CH_CFG_USE_MEMCORE                  TRUE
#endif

/**
 * @brief   Heap Allocator APIs.
 * @details If enabled then the memory heap allocator APIs are included
 *          in the kernel.
 *
 * @note    The default is @p TRUE.
 * @note    Requires @p CH_CFG_USE_MEMCORE and either @p CH_CFG_USE_MUTEXES or
 *          @p CH_CFG_USE_SEMAPHORES.
 * @note    Mutexes are recommended.
 */
#if !defined(CH_CFG_USE_HEAP)
#define CH_CFG_USE_HEAP                     TRUE
#endif

/**
 * @brief   Memory Pools Allocator APIs.
 * @details If enabled then the memory pools allocator APIs are included
 *          in the kernel.
 *
 * @note    The default is @p TRUE.
 */
#if !defined(CH_CFG_USE_MEMPOOLS)
#define CH_CFG_USE_MEMPOOLS                 TRUE
#endif

/**
 * @brief   Objects FIFOs APIs.
 * @details If enabled then the objects FIFOs APIs are included
 *          in the kernel.
 *
 * @note    The default is @p TRUE.
 */
#if !defined(CH_CFG_USE_OBJ_FIFOS)
#define CH_CFG_USE_OBJ_FIFOS                TRUE
#endif

/**
 * @brief   Pipes APIs.
 * @details If enabled then the pipes APIs are included
 *          in the kernel.
 *
 * @note    The default is @p TRUE.
 */
#if !defined(CH_CFG_USE_PIPES)
#define CH_CFG_USE_PIPES                    TRUE
#endif

/**
 * @brief   Dynamic Threads APIs.
 * @details If enabled then the dynamic threads creation APIs are included
 *          in the kernel.
 *
 * @note    The default is @p TRUE.
 * @note    Requires @p CH_CFG_USE_WAITEXIT.
 * @note    Requires @p CH_CFG_USE_HEAP and/or @p CH_CFG_USE_MEMPOOLS.
 */
#if !defined(CH_CFG_USE_DYNAMIC)
#define CH_CFG_USE_DYNAMIC                  TRUE
#endif

/** @} */

/*===========================================================================*/
/**
 * @name Objects factory options
 * @{
 */
/*===========================================================================*/

/**
 * @brief   Objects Factory APIs.
 * @details If enabled then the objects factory APIs are included in the
 *          kernel.
 *
 * @note    The default is @p FALSE.
 */
#if !defined(CH_CFG_USE_FACTORY)
#define CH_CFG_USE_FACTORY                  TRUE
#endif

/**
 * @brief   Maximum length for object names.
 * @details If the specified length is zero then the name is stored by
 *          pointer but this could have unintended side effects.
 */
#if !defined(CH_CFG_FACTORY_MAX_NAMES_LENGTH)
#define CH_CFG_FACTORY_MAX_NAMES_LENGTH     8
#endif

/**
 * @brief   Enables the registry of generic objects.
 */
#if !defined(CH_CFG_FACTORY_OBJECTS_REGISTRY)
#define CH_CFG_FACTORY_OBJECTS_REGISTRY     TRUE
#endif

/**
 * @brief   Enables factory for generic buffers.
 */
#if !defined(CH_CFG_FACTORY_GENERIC_BUFFERS)
#define CH_CFG_FACTORY_GENERIC_BUFFERS      TRUE
#endif

/**
 * @brief   Enables factory for semaphores.
 */
#if !defined(CH_CFG_FACTORY_SEMAPHORES)
#define CH_CFG_FACTORY_SEMAPHORES           TRUE
#endif

/**
 * @brief   Enables factory for mailboxes.
 */
#if !defined(CH_CFG_FACTORY_MAILBOXES)
#define CH_CFG_FACTORY_MAILBOXES            TRUE
#endif

/**
 * @brief   Enables factory for objects FIFOs.
 */
#if !defined(CH_CFG_FACTORY_OBJ_FIFOS)
#define CH_CFG_FACTORY_OBJ_FIFOS            TRUE
#endif

/**
 * @brief   Enables factory for Pipes.
 */
#if !defined(CH_CFG_FACTORY_PIPES) || defined(__DOXYGEN__)
#define CH_CFG_FACTORY_PIPES                TRUE
#endif

/** @} */

/*===========================================================================*/
/**
 * @name Debug options
 * @{
 */
/*===========================================================================*/

/**
 * @brief   Debug option, kernel statistics.
 *
 * @note    The default is @p FALSE.
 */
#if !defined(CH_DBG_STATISTICS)
#define CH_DBG_STATISTICS                   FALSE
#endif

/**
 * @brief   Debug option, system state check.
 * @details If enabled the correct call protocol for system APIs is checked
 *          at runtime.
 *
 * @note    The default is @p FALSE.
 */
#if !defined(CH_DBG_SYSTEM_STATE_CHECK)
#define CH_DBG_SYSTEM_STATE_CHECK           TRUE
#endif

/**
 * @brief   Debug option, parameters checks.
 * @details If enabled then the checks on the API functions input
 *          parameters are activated.
 *
 * @note    The default is @p FALSE.
 */
#if !defined(CH_DBG_ENABLE_CHECKS)
#define CH_DBG_ENABLE_CHECKS                TRUE
#endif

/**
 * @brief   Debug option, consistency checks.
 * @details If enabled then all the assertions in the kernel code are
 *          activated. This includes consistency checks inside the kernel,
 *          runtime anomalies and port-defined checks.
 *
 * @note    The default is @p FALSE.
 */
#if !defined(CH_DBG_ENABLE_ASSERTS)
#define CH_DBG_ENABLE_ASSERTS               TRUE
#endif

/**
 * @brief   Debug option, trace buffer.
 * @details If enabled then the trace buffer is activated.
 *
 * @note    The default is @p CH_DBG_TRACE_MASK_DISABLED.
 */
#if !defined(CH_DBG_TRACE_MASK)
#define CH_DBG_TRACE_MASK                   CH_DBG_TRACE_MASK_ALL
#endif

/**
 * @brief   Trace buffer entries.
 * @note    The trace buffer is only allocated if @p CH_DBG_TRACE_MASK is
 *          different from @p CH_DBG_TRACE_MASK_DISABLED.
 */
#if !defined(CH_DBG_TRACE_BUFFER_SIZE)
#define CH_DBG_TRACE_BUFFER_SIZE            128
#endif

/**
 * @brief   Debug option, stack checks.
 * @details If enabled then a runtime stack check is performed.
 *
 * @note    The default is @p FALSE.
 * @note    The stack check is performed in a architecture/port dependent way.
 *          It may not be implemented or some ports.
 * @note    The default failure mode is to halt the system with the global
 *          @p panic_msg variable set to @p NULL.
 */
#if !defined(CH_DBG_ENABLE_STACK_CHECK)
#define CH_DBG_ENABLE_STACK_CHECK           TRUE
#endif

/**
 * @brief   Debug option, stacks initialization.
 * @details If enabled then the threads working area is filled with a byte
 *          value when a thread is created. This can be useful for the
 *          runtime measurement of the used stack.
 *
 * @note    The default is @p FALSE.
 */
#if !defined(CH_DBG_FILL_THREADS)
#define CH_DBG_FILL_THREADS                 TRUE
#endif

/**
 * @brief   Debug option, threads profiling.
 * @details If enabled then a field is added to the @p thread_t structure that
 *          counts the system ticks occurred while executing the thread.
 *
 * @note    The default is @p FALSE.
 * @note    This debug option is not currently compatible with the
 *          tickless mode.
 */
#if !defined(CH_DBG_THREADS_PROFILING)
#define CH_DBG_THREADS_PROFILING            FALSE
#endif

/** @} */

/*===========================================================================*/
/**
 * @name Kernel hooks
 * @{
 */
/*===========================================================================*/

/**
 * @brief   System structure extension.
 * @details User fields added to the end of the @p ch_system_t structure.
 */
#define CH_CFG_SYSTEM_EXTRA_FIELDS                                          \
  /* Add threads custom fields here.*/

/**
 * @brief   System initialization hook.
 * @details User initialization code added to the @p chSysInit() function
 *          just before interrupts are enabled globally.
 */
#define CH_CFG_SYSTEM_INIT_HOOK() {                                         \
  /* Add threads initialization code here.*/                                \
}

/**
 * @brief   Threads descriptor structure extension.
 * @details User fields added to the end of the @p thread_t structure.
 */
#define CH_CFG_THREAD_EXTRA_FIELDS                                          \
  /* Add threads custom fields here.*/

/**
 * @brief   Threads initialization hook.
 * @details User initialization code added to the @p _thread_init() function.
 *
 * @note    It is invoked from within @p _thread_init() and implicitly from all
 *          the threads creation APIs.
 */
#define CH_CFG_THREAD_INIT_HOOK(tp) {                                       \
  /* Add threads initialization code here.*/                                \
}

/**
 * @brief   Threads finalization hook.
 * @details User finalization code added to the @p chThdExit() API.
 */
#define CH_CFG_THREAD_EXIT_HOOK(tp) {                                       \
  /* Add threads finalization code here.*/                                  \
}

/**
 * @brief   Context switch hook.
 * @details This hook is invoked just before switching between threads.
 */
#define CH_CFG_CONTEXT_SWITCH_HOOK(ntp, otp) {                              \
  /* Context switch code here.*/                                            \
}

/**
 * @brief   ISR enter hook.
 */
#define CH_CFG_IRQ_PROLOGUE_HOOK() {                                        \
  /* IRQ prologue code here.*/                                              \
}

/**
 * @brief   ISR exit hook.
 */
#define CH_CFG_IRQ_EPILOGUE_HOOK() {                                        \
  /* IRQ epilogue code here.*/                                              \
}

/**
 * @brief   Idle thread enter hook.
 * @note    This hook is invoked within a critical zone, no OS functions
 *          should be invoked from here.
 * @note    This macro can be used to activate a power saving mode.
 */
#define CH_CFG_IDLE_ENTER_HOOK() {                                          \
  /* Idle-enter code here.*/                                                \
}

/**
 * @brief   Idle thread leave hook.
 * @note    This hook is invoked within a critical zone, no OS functions
 *          should be invoked from here.
 * @note    This macro can be used to deactivate a power saving mode.
 */
#define CH_CFG_IDLE_LEAVE_HOOK() {                                          \
  /* Idle-leave code here.*/                                                \
}

/**
 * @brief   Idle Loop hook.
 * @details This hook is continuously invoked by the idle thread loop.
 */
#define CH_CFG_IDLE_LOOP_HOOK() {                                           \
  /* Idle loop code here.*/                                                 \
}

/**
 * @brief   System tick event hook.
 * @details This hook is invoked in the system tick handler immediately
 *          after processing the virtual timers queue.
 */
#define CH_CFG_SYSTEM_TICK_HOOK() {                                         \
  /* System tick event code here.*/                                         \
}

/**
 * @brief   System halt hook.
 * @details This hook is invoked in case to a system halting error before
 *          the system is halted.
 */
#define CH_CFG_SYSTEM_HALT_HOOK(reason) {                                   \
  /* System halt code here.*/                                               \
}

/**
 * @brief   Trace hook.
 * @details This hook is invoked each time a new record is written in the
 *          trace buffer.
 */
#define CH_CFG_TRACE_HOOK(tep) {                                            \
  /* Trace code here.*/                                                     \
}

/** @} */

/*===========================================================================*/
/* Port-specific settings (override port settings defaulted in chcore.h).    */
/*===========================================================================*/

#endif  /* CHCONF_H */

/** @} */
1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439
/*
 *	Intel IO-APIC support for multi-Pentium hosts.
 *
 *	Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
 *
 *	Many thanks to Stig Venaas for trying out countless experimental
 *	patches and reporting/debugging problems patiently!
 *
 *	(c) 1999, Multiple IO-APIC support, developed by
 *	Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
 *      Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
 *	further tested and cleaned up by Zach Brown <zab@redhat.com>
 *	and Ingo Molnar <mingo@redhat.com>
 *
 *	Fixes
 *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
 *					thanks to Eric Gilmore
 *					and Rolf G. Tews
 *					for testing these extensively
 *	Paul Diefenbaugh	:	Added full ACPI support
 */

#include <xen/config.h>
#include <xen/lib.h>
#include <xen/init.h>
#include <xen/irq.h>
#include <xen/delay.h>
#include <xen/sched.h>
#include <xen/acpi.h>
#include <xen/pci.h>
#include <xen/pci_regs.h>
#include <xen/keyhandler.h>
#include <asm/mc146818rtc.h>
#include <asm/smp.h>
#include <asm/desc.h>
#include <asm/msi.h>
#include <mach_apic.h>
#include <io_ports.h>
#include <public/physdev.h>

/* Different to Linux: our implementation can be simpler. */
#define make_8259A_irq(irq) (io_apic_irqs &= ~(1<<(irq)))

int (*ioapic_renumber_irq)(int ioapic, int irq);
atomic_t irq_mis_count;

/* Where if anywhere is the i8259 connect in external int mode */
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };

static DEFINE_SPINLOCK(ioapic_lock);

int skip_ioapic_setup;

#ifndef sis_apic_bug
/*
 * Is the SiS APIC rmw bug present?
 * -1 = don't know, 0 = no, 1 = yes
 */
int sis_apic_bug = -1;
#endif

/*
 * # of IRQ routing registers
 */
int __read_mostly nr_ioapic_registers[MAX_IO_APICS];
int __read_mostly nr_ioapics;

int disable_timer_pin_1 __initdata;

/*
 * Rough estimation of how many shared IRQs there are, can
 * be changed anytime.
 */
#define MAX_PLUS_SHARED_IRQS nr_irqs_gsi
#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + nr_irqs_gsi)

/*
 * This is performance-critical, we want to do it O(1)
 *
 * the indexing order of this array favors 1:1 mappings
 * between pins and IRQs.
 */

static struct irq_pin_list {
    int apic, pin;
    unsigned int next;
} *irq_2_pin;

static unsigned int irq_2_pin_free_entry;

/*
 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
 * shared ISA-space IRQs, so we have to support them. We are super
 * fast in the common case, and fast for shared ISA-space IRQs.
 */
static void add_pin_to_irq(unsigned int irq, int apic, int pin)
{
    struct irq_pin_list *entry = irq_2_pin + irq;

    while (entry->next) {
        BUG_ON((entry->apic == apic) && (entry->pin == pin));
        entry = irq_2_pin + entry->next;
    }
    
    BUG_ON((entry->apic == apic) && (entry->pin == pin));

    if (entry->pin != -1) {
        if (irq_2_pin_free_entry >= PIN_MAP_SIZE)
            panic("io_apic.c: whoops");
        entry->next = irq_2_pin_free_entry;
        entry = irq_2_pin + entry->next;
        irq_2_pin_free_entry = entry->next;
        entry->next = 0;
    }
    entry->apic = apic;
    entry->pin = pin;
}

/*
 * Reroute an IRQ to a different pin.
 */
static void __init replace_pin_at_irq(unsigned int irq,
                      int oldapic, int oldpin,
                      int newapic, int newpin)
{
    struct irq_pin_list *entry = irq_2_pin + irq;

    while (1) {
        if (entry->apic == oldapic && entry->pin == oldpin) {
            entry->apic = newapic;
            entry->pin = newpin;
        }
        if (!entry->next)
            break;
        entry = irq_2_pin + entry->next;
    }
}

static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
{
    struct irq_pin_list *entry = irq_2_pin + irq;
    unsigned int pin, reg;

    for (;;) {
        pin = entry->pin;
        if (pin == -1)
            break;
        reg = io_apic_read(entry->apic, 0x10 + pin*2);
        reg &= ~disable;
        reg |= enable;
        io_apic_modify(entry->apic, 0x10 + pin*2, reg);
        if (!entry->next)
            break;
        entry = irq_2_pin + entry->next;
    }
}

/* mask = 1 */
static void __mask_IO_APIC_irq (unsigned int irq)
{
    __modify_IO_APIC_irq(irq, 0x00010000, 0);
}

/* mask = 0 */
static void __unmask_IO_APIC_irq (unsigned int irq)
{
    __modify_IO_APIC_irq(irq, 0, 0x00010000);
}

/* trigger = 0 */
static void __edge_IO_APIC_irq (unsigned int irq)
{
    __modify_IO_APIC_irq(irq, 0, 0x00008000);
}

/* trigger = 1 */
static void __level_IO_APIC_irq (unsigned int irq)
{
    __modify_IO_APIC_irq(irq, 0x00008000, 0);
}

static void mask_IO_APIC_irq (unsigned int irq)
{
    unsigned long flags;

    spin_lock_irqsave(&ioapic_lock, flags);
    __mask_IO_APIC_irq(irq);
    spin_unlock_irqrestore(&ioapic_lock, flags);
}

static void unmask_IO_APIC_irq (unsigned int irq)
{
    unsigned long flags;

    spin_lock_irqsave(&ioapic_lock, flags);
    __unmask_IO_APIC_irq(irq);
    spin_unlock_irqrestore(&ioapic_lock, flags);
}

static void __eoi_IO_APIC_irq(unsigned int irq)
{
    struct irq_pin_list *entry = irq_2_pin + irq;
    unsigned int pin, vector = IO_APIC_VECTOR(irq);

    for (;;) {
        pin = entry->pin;
        if (pin == -1)
            break;
        io_apic_eoi(entry->apic, vector);
        if (!entry->next)
            break;
        entry = irq_2_pin + entry->next;
    }
}

static void eoi_IO_APIC_irq(unsigned int irq)
{
    unsigned long flags;
    spin_lock_irqsave(&ioapic_lock, flags);
    __eoi_IO_APIC_irq(irq);
    spin_unlock_irqrestore(&ioapic_lock, flags);
}

#define clear_IO_APIC_pin(a,p)     __clear_IO_APIC_pin(a,p,0)
#define clear_IO_APIC_pin_raw(a,p) __clear_IO_APIC_pin(a,p,1)
static void __clear_IO_APIC_pin(unsigned int apic, unsigned int pin, int raw)
{
    unsigned int (*read)(unsigned int, unsigned int)
        = raw ? __io_apic_read : io_apic_read;
    void (*write)(unsigned int, unsigned int, unsigned int)
        = raw ? __io_apic_write : io_apic_write;
    struct IO_APIC_route_entry entry;
    unsigned long flags;
    
    /* Check delivery_mode to be sure we're not clearing an SMI pin */
    spin_lock_irqsave(&ioapic_lock, flags);
    *(((int*)&entry) + 0) = (*read)(apic, 0x10 + 2 * pin);
    *(((int*)&entry) + 1) = (*read)(apic, 0x11 + 2 * pin);
    spin_unlock_irqrestore(&ioapic_lock, flags);
    if (entry.delivery_mode == dest_SMI)
        return;

    /*
     * Disable it in the IO-APIC irq-routing table:
     */
    memset(&entry, 0, sizeof(entry));
    entry.mask = 1;
    spin_lock_irqsave(&ioapic_lock, flags);
    (*write)(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
    (*write)(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
    spin_unlock_irqrestore(&ioapic_lock, flags);
}

static void clear_IO_APIC (void)
{
    int apic, pin;

    for (apic = 0; apic < nr_ioapics; apic++) {
        for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
            clear_IO_APIC_pin(apic, pin);
            clear_IO_APIC_pin_raw(apic, pin);
        }
    }
}

#ifdef CONFIG_SMP
fastcall void smp_irq_move_cleanup_interrupt(struct cpu_user_regs *regs)
{
    unsigned vector, me;
    struct cpu_user_regs *old_regs = set_irq_regs(regs);

    ack_APIC_irq();
    irq_enter();

    me = smp_processor_id();
    for (vector = FIRST_DYNAMIC_VECTOR; vector < NR_VECTORS; vector++) {
        unsigned int irq;
        unsigned int irr;
        struct irq_desc *desc;
        struct irq_cfg *cfg;
        irq = __get_cpu_var(vector_irq)[vector];

        if (irq == -1)
            continue;

        desc = irq_to_desc(irq);
        if (!desc)
            continue;

        cfg = desc->chip_data;
        spin_lock(&desc->lock);
        if (!cfg->move_cleanup_count)
            goto unlock;

        if (vector == cfg->vector && cpu_isset(me, cfg->domain))
            goto unlock;

        irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
        /*
         * Check if the vector that needs to be cleanedup is
         * registered at the cpu's IRR. If so, then this is not
         * the best time to clean it up. Lets clean it up in the
         * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
         * to myself.
         */
        if (irr  & (1 << (vector % 32))) {
            genapic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
            goto unlock;
        }
        __get_cpu_var(vector_irq)[vector] = -1;
        cfg->move_cleanup_count--;
unlock:
        spin_unlock(&desc->lock);
    }

    irq_exit();
    set_irq_regs(old_regs);
}

static void send_cleanup_vector(struct irq_cfg *cfg)
{
    cpumask_t cleanup_mask;

    cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
    cfg->move_cleanup_count = cpus_weight(cleanup_mask);
    genapic->send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);

    cfg->move_in_progress = 0;
}

void irq_complete_move(struct irq_desc **descp)
{
    struct irq_desc *desc = *descp;
    struct irq_cfg *cfg = desc->chip_data;
    unsigned vector, me;

    if (likely(!cfg->move_in_progress))
        return;

    vector = get_irq_regs()->entry_vector;
    me = smp_processor_id();

    if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
        send_cleanup_vector(cfg);
}

unsigned int set_desc_affinity(struct irq_desc *desc, cpumask_t mask)
{
    struct irq_cfg *cfg;
    unsigned int irq;
    int ret;
    unsigned long flags;
    cpumask_t dest_mask;

    if (!cpus_intersects(mask, cpu_online_map))
        return BAD_APICID;

    irq = desc->irq;
    cfg = desc->chip_data;

    local_irq_save(flags);
    lock_vector_lock();
    ret = __assign_irq_vector(irq, cfg, mask);
    unlock_vector_lock();
    local_irq_restore(flags);

    if (ret < 0)
        return BAD_APICID;

    cpus_copy(desc->affinity, mask);
    cpus_and(dest_mask, desc->affinity, cfg->domain);

    return cpu_mask_to_apicid(dest_mask);
}

static void
set_ioapic_affinity_irq_desc(struct irq_desc *desc,
                                        const struct cpumask mask)
{
    unsigned long flags;
    unsigned int dest;
    int pin, irq;
    struct irq_cfg *cfg;
    struct irq_pin_list *entry;

    irq = desc->irq;
    cfg = desc->chip_data;

    spin_lock_irqsave(&ioapic_lock, flags);
    dest = set_desc_affinity(desc, mask);
    if (dest != BAD_APICID) {
        if ( !x2apic_enabled )
            dest = SET_APIC_LOGICAL_ID(dest);
        entry = irq_2_pin + irq;
        for (;;) {
            unsigned int data;
            pin = entry->pin;
            if (pin == -1)
                break;

            io_apic_write(entry->apic, 0x10 + 1 + pin*2, dest);
            data = io_apic_read(entry->apic, 0x10 + pin*2);
            data &= ~IO_APIC_REDIR_VECTOR_MASK;
            data |= cfg->vector & 0xFF;
            io_apic_modify(entry->apic, 0x10 + pin*2, data);

            if (!entry->next)
                break;
            entry = irq_2_pin + entry->next;
        }
    }
    spin_unlock_irqrestore(&ioapic_lock, flags);

}

static void
set_ioapic_affinity_irq(unsigned int irq, const struct cpumask mask)
{
    struct irq_desc *desc;

    desc = irq_to_desc(irq);

    set_ioapic_affinity_irq_desc(desc, mask);
}
#endif /* CONFIG_SMP */

/*
 * Find the IRQ entry number of a certain pin.
 */
static int find_irq_entry(int apic, int pin, int type)
{
    int i;

    for (i = 0; i < mp_irq_entries; i++)
        if (mp_irqs[i].mpc_irqtype == type &&
            (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
             mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
            mp_irqs[i].mpc_dstirq == pin)
            return i;

    return -1;
}

/*
 * Find the pin to which IRQ[irq] (ISA) is connected
 */
static int __init find_isa_irq_pin(int irq, int type)
{
    int i;

    for (i = 0; i < mp_irq_entries; i++) {
        int lbus = mp_irqs[i].mpc_srcbus;

        if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
             mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
             mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
             mp_bus_id_to_type[lbus] == MP_BUS_NEC98
            ) &&
            (mp_irqs[i].mpc_irqtype == type) &&
            (mp_irqs[i].mpc_srcbusirq == irq))

            return mp_irqs[i].mpc_dstirq;
    }
    return -1;
}

static int __init find_isa_irq_apic(int irq, int type)
{
    int i;

    for (i = 0; i < mp_irq_entries; i++) {
        int lbus = mp_irqs[i].mpc_srcbus;

        if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
             mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
             mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
             mp_bus_id_to_type[lbus] == MP_BUS_NEC98
            ) &&
            (mp_irqs[i].mpc_irqtype == type) &&
            (mp_irqs[i].mpc_srcbusirq == irq))
            break;
    }
    if (i < mp_irq_entries) {
        int apic;
        for(apic = 0; apic < nr_ioapics; apic++) {
            if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
                return apic;
        }
    }

    return -1;
}

/*
 * Find a specific PCI IRQ entry.
 * Not an __init, possibly needed by modules
 */
static int pin_2_irq(int idx, int apic, int pin);

/*
 * This function currently is only a helper for the i386 smp boot process where 
 * we need to reprogram the ioredtbls to cater for the cpus which have come online
 * so mask in all cases should simply be TARGET_CPUS
 */
#ifdef CONFIG_SMP
void /*__init*/ setup_ioapic_dest(void)
{
    int pin, ioapic, irq, irq_entry;
    struct irq_cfg *cfg;

    if (skip_ioapic_setup == 1)
        return;

    for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
        for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
            irq_entry = find_irq_entry(ioapic, pin, mp_INT);
            if (irq_entry == -1)
                continue;
            irq = pin_2_irq(irq_entry, ioapic, pin);
            cfg = irq_cfg(irq);
            BUG_ON(cpus_empty(cfg->domain));
            set_ioapic_affinity_irq(irq, cfg->domain);
        }

    }
}
#endif

/*
 * EISA Edge/Level control register, ELCR
 */
static int EISA_ELCR(unsigned int irq)
{
    if (platform_legacy_irq(irq)) {
        unsigned int port = 0x4d0 + (irq >> 3);
        return (inb(port) >> (irq & 7)) & 1;
    }
    apic_printk(APIC_VERBOSE, KERN_INFO
                "Broken MPtable reports ISA irq %d\n", irq);
    return 0;
}

/* EISA interrupts are always polarity zero and can be edge or level
 * trigger depending on the ELCR value.  If an interrupt is listed as
 * EISA conforming in the MP table, that means its trigger type must
 * be read in from the ELCR */

#define default_EISA_trigger(idx)    (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
#define default_EISA_polarity(idx)	(0)

/* ISA interrupts are always polarity zero edge triggered,
 * when listed as conforming in the MP table. */

#define default_ISA_trigger(idx)	(0)
#define default_ISA_polarity(idx)	(0)

/* PCI interrupts are always polarity one level triggered,
 * when listed as conforming in the MP table. */

#define default_PCI_trigger(idx)	(1)
#define default_PCI_polarity(idx)	(1)

/* MCA interrupts are always polarity zero level triggered,
 * when listed as conforming in the MP table. */

#define default_MCA_trigger(idx)	(1)
#define default_MCA_polarity(idx)	(0)

/* NEC98 interrupts are always polarity zero edge triggered,
 * when listed as conforming in the MP table. */

#define default_NEC98_trigger(idx)     (0)
#define default_NEC98_polarity(idx)    (0)

static int __init MPBIOS_polarity(int idx)
{
    int bus = mp_irqs[idx].mpc_srcbus;
    int polarity;

    /*
     * Determine IRQ line polarity (high active or low active):
     */
    switch (mp_irqs[idx].mpc_irqflag & 3)
    {
    case 0: /* conforms, ie. bus-type dependent polarity */
    {
        switch (mp_bus_id_to_type[bus])
        {
        case MP_BUS_ISA: /* ISA pin */
        {
            polarity = default_ISA_polarity(idx);
            break;
        }
        case MP_BUS_EISA: /* EISA pin */
        {
            polarity = default_EISA_polarity(idx);
            break;
        }
        case MP_BUS_PCI: /* PCI pin */
        {
            polarity = default_PCI_polarity(idx);
            break;
        }
        case MP_BUS_MCA: /* MCA pin */
        {
            polarity = default_MCA_polarity(idx);
            break;
        }
        case MP_BUS_NEC98: /* NEC 98 pin */
        {
            polarity = default_NEC98_polarity(idx);
            break;
        }
        default:
        {
            printk(KERN_WARNING "broken BIOS!!\n");
            polarity = 1;
            break;
        }
        }
        break;
    }
    case 1: /* high active */
    {
        polarity = 0;
        break;
    }
    case 2: /* reserved */
    {
        printk(KERN_WARNING "broken BIOS!!\n");
        polarity = 1;
        break;
    }
    case 3: /* low active */
    {
        polarity = 1;
        break;
    }
    default: /* invalid */
    {
        printk(KERN_WARNING "broken BIOS!!\n");
        polarity = 1;
        break;
    }
    }
    return polarity;
}

static int MPBIOS_trigger(int idx)
{
    int bus = mp_irqs[idx].mpc_srcbus;
    int trigger;

    /*
     * Determine IRQ trigger mode (edge or level sensitive):
     */
    switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
    {
    case 0: /* conforms, ie. bus-type dependent */
    {
        switch (mp_bus_id_to_type[bus])
        {
        case MP_BUS_ISA: /* ISA pin */
        {
            trigger = default_ISA_trigger(idx);
            break;
        }
        case MP_BUS_EISA: /* EISA pin */
        {
            trigger = default_EISA_trigger(idx);
            break;
        }
        case MP_BUS_PCI: /* PCI pin */
        {
            trigger = default_PCI_trigger(idx);
            break;
        }
        case MP_BUS_MCA: /* MCA pin */
        {
            trigger = default_MCA_trigger(idx);
            break;
        }
        case MP_BUS_NEC98: /* NEC 98 pin */
        {
            trigger = default_NEC98_trigger(idx);
            break;
        }
        default:
        {
            printk(KERN_WARNING "broken BIOS!!\n");
            trigger = 1;
            break;
        }
        }
        break;
    }
    case 1: /* edge */
    {
        trigger = 0;
        break;
    }
    case 2: /* reserved */
    {
        printk(KERN_WARNING "broken BIOS!!\n");
        trigger = 1;
        break;
    }
    case 3: /* level */
    {
        trigger = 1;
        break;
    }
    default: /* invalid */
    {
        printk(KERN_WARNING "broken BIOS!!\n");
        trigger = 0;
        break;
    }
    }
    return trigger;
}

static inline int irq_polarity(int idx)
{
    return MPBIOS_polarity(idx);
}

static inline int irq_trigger(int idx)
{
    return MPBIOS_trigger(idx);
}

static int pin_2_irq(int idx, int apic, int pin)
{
    int irq, i;
    int bus = mp_irqs[idx].mpc_srcbus;

    /*
     * Debugging check, we are in big trouble if this message pops up!
     */
    if (mp_irqs[idx].mpc_dstirq != pin)
        printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");

    switch (mp_bus_id_to_type[bus])
    {
    case MP_BUS_ISA: /* ISA pin */
    case MP_BUS_EISA:
    case MP_BUS_MCA:
    case MP_BUS_NEC98:
    {
        irq = mp_irqs[idx].mpc_srcbusirq;
        break;
    }
    case MP_BUS_PCI: /* PCI pin */
    {
        /*
         * PCI IRQs are mapped in order
         */
        i = irq = 0;
        while (i < apic)
            irq += nr_ioapic_registers[i++];
        irq += pin;

        /*
         * For MPS mode, so far only needed by ES7000 platform
         */
        if (ioapic_renumber_irq)
            irq = ioapic_renumber_irq(apic, irq);

        break;
    }
    default:
    {
        printk(KERN_ERR "unknown bus type %d.\n",bus);
        irq = 0;
        break;
    }
    }

    return irq;
}

static inline int IO_APIC_irq_trigger(int irq)
{
    int apic, idx, pin;

    for (apic = 0; apic < nr_ioapics; apic++) {
        for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
            idx = find_irq_entry(apic,pin,mp_INT);
            if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
                return irq_trigger(idx);
        }
    }
    /*
     * nonexistent IRQs are edge default
     */
    return 0;
}

static hw_irq_controller ioapic_level_type;
static hw_irq_controller ioapic_edge_type;

#define IOAPIC_AUTO	-1
#define IOAPIC_EDGE	0
#define IOAPIC_LEVEL	1

#define SET_DEST(x, y, value) \
    do { if ( x2apic_enabled ) x = value; else y = value; } while(0)

static inline void ioapic_register_intr(int irq, unsigned long trigger)
{
    if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
        trigger == IOAPIC_LEVEL)
        irq_desc[irq].handler = &ioapic_level_type;
    else
        irq_desc[irq].handler = &ioapic_edge_type;
}

static void __init setup_IO_APIC_irqs(void)
{
    struct IO_APIC_route_entry entry;
    int apic, pin, idx, irq, first_notcon = 1, vector;
    unsigned long flags;
    struct irq_cfg *cfg;

    apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");

    for (apic = 0; apic < nr_ioapics; apic++) {
        for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {

            /*
             * add it to the IO-APIC irq-routing table:
             */
            memset(&entry,0,sizeof(entry));

            entry.delivery_mode = INT_DELIVERY_MODE;
            entry.dest_mode = INT_DEST_MODE;
            entry.mask = 0;                /* enable IRQ */

            idx = find_irq_entry(apic,pin,mp_INT);
            if (idx == -1) {
                if (first_notcon) {
                    apic_printk(APIC_VERBOSE, KERN_DEBUG
                                " IO-APIC (apicid-pin) %d-%d",
                                mp_ioapics[apic].mpc_apicid,
                                pin);
                    first_notcon = 0;
                } else
                    apic_printk(APIC_VERBOSE, ", %d-%d",
                                mp_ioapics[apic].mpc_apicid, pin);
                continue;
            }

            entry.trigger = irq_trigger(idx);
            entry.polarity = irq_polarity(idx);

            if (irq_trigger(idx)) {
                entry.trigger = 1;
                entry.mask = 1;
            }

            irq = pin_2_irq(idx, apic, pin);
            /*
             * skip adding the timer int on secondary nodes, which causes
             * a small but painful rift in the time-space continuum
             */
            if (multi_timer_check(apic, irq))
                continue;
            else
                add_pin_to_irq(irq, apic, pin);

            if (!apic && !IO_APIC_IRQ(irq))
                continue;

            if (IO_APIC_IRQ(irq)) {
                vector = assign_irq_vector(irq);
                BUG_ON(vector < 0);
                entry.vector = vector;
                ioapic_register_intr(irq, IOAPIC_AUTO);

                if (!apic && platform_legacy_irq(irq))
                    disable_8259A_irq(irq);
            }
            cfg = irq_cfg(irq);
            SET_DEST(entry.dest.dest32, entry.dest.logical.logical_dest,
                cpu_mask_to_apicid(cfg->domain));
            spin_lock_irqsave(&ioapic_lock, flags);
            io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
            io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
            set_native_irq_info(irq, TARGET_CPUS);
            spin_unlock_irqrestore(&ioapic_lock, flags);
	}
    }

    if (!first_notcon)
        apic_printk(APIC_VERBOSE, " not connected.\n");
}

/*
 * Set up the 8259A-master output pin:
 */
static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
{
    struct IO_APIC_route_entry entry;
    unsigned long flags;

    memset(&entry,0,sizeof(entry));

    disable_8259A_irq(0);

    /* mask LVT0 */
    apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);

    /*
     * We use logical delivery to get the timer IRQ
     * to the first CPU.
     */
    entry.dest_mode = INT_DEST_MODE;
    entry.mask = 0;					/* unmask IRQ now */
    SET_DEST(entry.dest.dest32, entry.dest.logical.logical_dest,
        cpu_mask_to_apicid(TARGET_CPUS));
    entry.delivery_mode = INT_DELIVERY_MODE;
    entry.polarity = 0;
    entry.trigger = 0;
    entry.vector = vector;

    /*
     * The timer IRQ doesn't have to know that behind the
     * scene we have a 8259A-master in AEOI mode ...
     */
    irq_desc[0].handler = &ioapic_edge_type;

    /*
     * Add it to the IO-APIC irq-routing table:
     */
    spin_lock_irqsave(&ioapic_lock, flags);
    io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
    io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
    spin_unlock_irqrestore(&ioapic_lock, flags);

    enable_8259A_irq(0);
}

static inline void UNEXPECTED_IO_APIC(void)
{
}

static void /*__init*/ __print_IO_APIC(void)
{
    int apic, i;
    union IO_APIC_reg_00 reg_00;
    union IO_APIC_reg_01 reg_01;
    union IO_APIC_reg_02 reg_02;
    union IO_APIC_reg_03 reg_03;
    unsigned long flags;

    printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
    for (i = 0; i < nr_ioapics; i++)
        printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
               mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);

    /*
     * We are a bit conservative about what we expect.  We have to
     * know about every hardware change ASAP.
     */
    printk(KERN_INFO "testing the IO APIC.......................\n");

    for (apic = 0; apic < nr_ioapics; apic++) {

	spin_lock_irqsave(&ioapic_lock, flags);
	reg_00.raw = io_apic_read(apic, 0);
	reg_01.raw = io_apic_read(apic, 1);
	if (reg_01.bits.version >= 0x10)
            reg_02.raw = io_apic_read(apic, 2);
	if (reg_01.bits.version >= 0x20)
            reg_03.raw = io_apic_read(apic, 3);
	spin_unlock_irqrestore(&ioapic_lock, flags);

	printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
	printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
	printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
	printk(KERN_DEBUG ".......    : Delivery Type: %X\n", reg_00.bits.delivery_type);
	printk(KERN_DEBUG ".......    : LTS          : %X\n", reg_00.bits.LTS);
	if (reg_00.bits.ID >= get_physical_broadcast())
            UNEXPECTED_IO_APIC();
	if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
            UNEXPECTED_IO_APIC();

	printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
	printk(KERN_DEBUG ".......     : max redirection entries: %04X\n", reg_01.bits.entries);
	if (	(reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
		(reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
		(reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
		(reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
		(reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
		(reg_01.bits.entries != 0x2E) &&
		(reg_01.bits.entries != 0x3F)
            )
            UNEXPECTED_IO_APIC();

	printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.bits.PRQ);
	printk(KERN_DEBUG ".......     : IO APIC version: %04X\n", reg_01.bits.version);
	if (	(reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
		(reg_01.bits.version != 0x10) && /* oldest IO-APICs */
		(reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
		(reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
		(reg_01.bits.version != 0x20)    /* Intel P64H (82806 AA) */
            )
            UNEXPECTED_IO_APIC();
	if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
            UNEXPECTED_IO_APIC();

	/*
	 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
	 * but the value of reg_02 is read as the previous read register
	 * value, so ignore it if reg_02 == reg_01.
	 */
	if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
            printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
            printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.bits.arbitration);
            if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
                UNEXPECTED_IO_APIC();
	}

	/*
	 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
	 * or reg_03, but the value of reg_0[23] is read as the previous read
	 * register value, so ignore it if reg_03 == reg_0[12].
	 */
	if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
	    reg_03.raw != reg_01.raw) {
            printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
            printk(KERN_DEBUG ".......     : Boot DT    : %X\n", reg_03.bits.boot_DT);
            if (reg_03.bits.__reserved_1)
                UNEXPECTED_IO_APIC();
	}

	printk(KERN_DEBUG ".... IRQ redirection table:\n");

	printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
               " Stat Dest Deli Vect:   \n");

	for (i = 0; i <= reg_01.bits.entries; i++) {
            struct IO_APIC_route_entry entry;

            spin_lock_irqsave(&ioapic_lock, flags);
            *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
            *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
            spin_unlock_irqrestore(&ioapic_lock, flags);

            printk(KERN_DEBUG " %02x %03X %02X  ",
                   i,
                   entry.dest.logical.logical_dest,
                   entry.dest.physical.physical_dest
		);

            printk("%1d    %1d    %1d   %1d   %1d    %1d    %1d    %02X\n",
                   entry.mask,
                   entry.trigger,
                   entry.irr,
                   entry.polarity,
                   entry.delivery_status,
                   entry.dest_mode,
                   entry.delivery_mode,
                   entry.vector
		);
	}
    }
    printk(KERN_INFO "Using vector-based indexing\n");
    printk(KERN_DEBUG "IRQ to pin mappings:\n");
    for (i = 0; i < nr_irqs_gsi; i++) {
        struct irq_pin_list *entry = irq_2_pin + i;
        if (entry->pin < 0)
            continue;
        printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
        for (;;) {
            printk("-> %d:%d", entry->apic, entry->pin);
            if (!entry->next)
                break;
            entry = irq_2_pin + entry->next;
        }
        printk("\n");
    }

    printk(KERN_INFO ".................................... done.\n");

    return;
}

void print_IO_APIC(void)
{
    if (apic_verbosity != APIC_QUIET)
        __print_IO_APIC();
}

static void _print_IO_APIC_keyhandler(unsigned char key)
{
    __print_IO_APIC();
}
static struct keyhandler print_IO_APIC_keyhandler = {
    .diagnostic = 1,
    .u.fn = _print_IO_APIC_keyhandler,
    .desc = "print ioapic info"
};

static void __init enable_IO_APIC(void)
{
    int i8259_apic, i8259_pin;
    int i, apic;
    unsigned long flags;

    /* Initialise dynamic irq_2_pin free list. */
    irq_2_pin = xmalloc_array(struct irq_pin_list, PIN_MAP_SIZE);
    memset(irq_2_pin, 0, PIN_MAP_SIZE * sizeof(*irq_2_pin));
        
    for (i = 0; i < PIN_MAP_SIZE; i++)
        irq_2_pin[i].pin = -1;
    for (i = irq_2_pin_free_entry = nr_irqs_gsi; i < PIN_MAP_SIZE; i++)
        irq_2_pin[i].next = i + 1;

    for(apic = 0; apic < nr_ioapics; apic++) {
        int pin;
        /* See if any of the pins is in ExtINT mode */
        for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
            struct IO_APIC_route_entry entry;
            spin_lock_irqsave(&ioapic_lock, flags);
            *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
            *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
            spin_unlock_irqrestore(&ioapic_lock, flags);


            /* If the interrupt line is enabled and in ExtInt mode
             * I have found the pin where the i8259 is connected.
             */
            if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
                ioapic_i8259.apic = apic;
                ioapic_i8259.pin  = pin;
                goto found_i8259;
            }
        }
    }
 found_i8259:
    /* Look to see what if the MP table has reported the ExtINT */
    /* If we could not find the appropriate pin by looking at the ioapic
     * the i8259 probably is not connected the ioapic but give the
     * mptable a chance anyway.
     */
    i8259_pin  = find_isa_irq_pin(0, mp_ExtINT);
    i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
    /* Trust the MP table if nothing is setup in the hardware */
    if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
        printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
        ioapic_i8259.pin  = i8259_pin;
        ioapic_i8259.apic = i8259_apic;
    }
    /* Complain if the MP table and the hardware disagree */
    if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
        (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
    {
        printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
    }

    /*
     * Do not trust the IO-APIC being empty at bootup
     */
    clear_IO_APIC();
}

/*
 * Not an __init, needed by the reboot code
 */
void disable_IO_APIC(void)
{
    /*
     * Clear the IO-APIC before rebooting:
     */
    clear_IO_APIC();

    /*
     * If the i8259 is routed through an IOAPIC
     * Put that IOAPIC in virtual wire mode
     * so legacy interrupts can be delivered.
     */
    if (ioapic_i8259.pin != -1) {
        struct IO_APIC_route_entry entry;
        unsigned long flags;

        memset(&entry, 0, sizeof(entry));
        entry.mask            = 0; /* Enabled */
        entry.trigger         = 0; /* Edge */
        entry.irr             = 0;
        entry.polarity        = 0; /* High */
        entry.delivery_status = 0;
        entry.dest_mode       = 0; /* Physical */
        entry.delivery_mode   = dest_ExtINT; /* ExtInt */
        entry.vector          = 0;
        SET_DEST(entry.dest.dest32, entry.dest.physical.physical_dest,
            get_apic_id());

        /*
         * Add it to the IO-APIC irq-routing table:
         */
        spin_lock_irqsave(&ioapic_lock, flags);
        io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
                      *(((int *)&entry)+1));
        io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
                      *(((int *)&entry)+0));
        spin_unlock_irqrestore(&ioapic_lock, flags);
    }
    disconnect_bsp_APIC(ioapic_i8259.pin != -1);
}

/*
 * function to set the IO-APIC physical IDs based on the
 * values stored in the MPC table.
 *
 * by Matt Domsch <Matt_Domsch@dell.com>  Tue Dec 21 12:25:05 CST 1999
 */

#ifndef CONFIG_X86_NUMAQ
static void __init setup_ioapic_ids_from_mpc(void)
{
    union IO_APIC_reg_00 reg_00;
    physid_mask_t phys_id_present_map;
    int apic;
    int i;
    unsigned char old_id;
    unsigned long flags;

    /*
     * Don't check I/O APIC IDs for xAPIC systems. They have
     * no meaning without the serial APIC bus.
     */
    if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
        || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
        return;

    /*
     * This is broken; anything with a real cpu count has to
     * circumvent this idiocy regardless.
     */
    phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);

    /*
     * Set the IOAPIC ID to the value stored in the MPC table.
     */
    for (apic = 0; apic < nr_ioapics; apic++) {

        /* Read the register 0 value */
        spin_lock_irqsave(&ioapic_lock, flags);
        reg_00.raw = io_apic_read(apic, 0);
        spin_unlock_irqrestore(&ioapic_lock, flags);
		
        old_id = mp_ioapics[apic].mpc_apicid;

        if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
            printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
                   apic, mp_ioapics[apic].mpc_apicid);
            printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
                   reg_00.bits.ID);
            mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
        }

        /*
         * Sanity check, is the ID really free? Every APIC in a
         * system must have a unique ID or we get lots of nice
         * 'stuck on smp_invalidate_needed IPI wait' messages.
         */
        if (check_apicid_used(phys_id_present_map,
                              mp_ioapics[apic].mpc_apicid)) {
            printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
                   apic, mp_ioapics[apic].mpc_apicid);
            for (i = 0; i < get_physical_broadcast(); i++)
                if (!physid_isset(i, phys_id_present_map))
                    break;
            if (i >= get_physical_broadcast())
                panic("Max APIC ID exceeded!\n");
            printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
                   i);
            physid_set(i, phys_id_present_map);
            mp_ioapics[apic].mpc_apicid = i;
        } else {
            physid_mask_t tmp;
            tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
            apic_printk(APIC_VERBOSE, "Setting %d in the "
                        "phys_id_present_map\n",
                        mp_ioapics[apic].mpc_apicid);
            physids_or(phys_id_present_map, phys_id_present_map, tmp);
        }


        /*
         * We need to adjust the IRQ routing table
         * if the ID changed.
         */
        if (old_id != mp_ioapics[apic].mpc_apicid)
            for (i = 0; i < mp_irq_entries; i++)
                if (mp_irqs[i].mpc_dstapic == old_id)
                    mp_irqs[i].mpc_dstapic
                        = mp_ioapics[apic].mpc_apicid;

        /*
         * Read the right value from the MPC table and
         * write it into the ID register.
         */
        apic_printk(APIC_VERBOSE, KERN_INFO
                    "...changing IO-APIC physical APIC ID to %d ...",
                    mp_ioapics[apic].mpc_apicid);

        reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
        spin_lock_irqsave(&ioapic_lock, flags);
        io_apic_write(apic, 0, reg_00.raw);
        spin_unlock_irqrestore(&ioapic_lock, flags);

        /*
         * Sanity check
         */
        spin_lock_irqsave(&ioapic_lock, flags);
        reg_00.raw = io_apic_read(apic, 0);
        spin_unlock_irqrestore(&ioapic_lock, flags);
        if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
            printk("could not set ID!\n");
        else
            apic_printk(APIC_VERBOSE, " ok.\n");
    }
}
#else
static void __init setup_ioapic_ids_from_mpc(void) { }
#endif

/*
 * There is a nasty bug in some older SMP boards, their mptable lies
 * about the timer IRQ. We do the following to work around the situation:
 *
 *	- timer IRQ defaults to IO-APIC IRQ
 *	- if this function detects that timer IRQs are defunct, then we fall
 *	  back to ISA timer IRQs
 */
static int __init timer_irq_works(void)
{
    extern unsigned long pit0_ticks;
    unsigned long t1, flags;

    t1 = pit0_ticks;
    mb();

    local_save_flags(flags);
    local_irq_enable();
    /* Let ten ticks pass... */
    mdelay((10 * 1000) / HZ);
    local_irq_restore(flags);

    /*
     * Expect a few ticks at least, to be sure some possible
     * glue logic does not lock up after one or two first
     * ticks in a non-ExtINT mode.  Also the local APIC
     * might have cached one ExtINT interrupt.  Finally, at
     * least one tick may be lost due to delays.
     */
    mb();
    if (pit0_ticks - t1 > 4)
        return 1;

    return 0;
}

/*
 * In the SMP+IOAPIC case it might happen that there are an unspecified
 * number of pending IRQ events unhandled. These cases are very rare,
 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
 * better to do it this way as thus we do not have to be aware of
 * 'pending' interrupts in the IRQ path, except at this point.
 */
/*
 * Edge triggered needs to resend any interrupt
 * that was delayed but this is now handled in the device
 * independent code.
 */

/*
 * Starting up a edge-triggered IO-APIC interrupt is
 * nasty - we need to make sure that we get the edge.
 * If it is already asserted for some reason, we need
 * return 1 to indicate that is was pending.
 *
 * This is not complete - we should be able to fake
 * an edge even if it isn't on the 8259A...
 */
static unsigned int startup_edge_ioapic_irq(unsigned int irq)
{
    int was_pending = 0;
    unsigned long flags;

    spin_lock_irqsave(&ioapic_lock, flags);
    if (platform_legacy_irq(irq)) {
        disable_8259A_irq(irq);
        if (i8259A_irq_pending(irq))
            was_pending = 1;
    }
    __unmask_IO_APIC_irq(irq);
    spin_unlock_irqrestore(&ioapic_lock, flags);

    return was_pending;
}

/*
 * Once we have recorded IRQ_PENDING already, we can mask the
 * interrupt for real. This prevents IRQ storms from unhandled
 * devices.
 */
static void ack_edge_ioapic_irq(unsigned int irq)
{
    struct irq_desc *desc = irq_to_desc(irq);
    
    irq_complete_move(&desc);
    move_native_irq(irq);

    if ((desc->status & (IRQ_PENDING | IRQ_DISABLED))
        == (IRQ_PENDING | IRQ_DISABLED))
        mask_IO_APIC_irq(irq);
    ack_APIC_irq();
}

/*
 * Level triggered interrupts can just be masked,
 * and shutting down and starting up the interrupt
 * is the same as enabling and disabling them -- except
 * with a startup need to return a "was pending" value.
 *
 * Level triggered interrupts are special because we
 * do not touch any IO-APIC register while handling
 * them. We ack the APIC in the end-IRQ handler, not
 * in the start-IRQ-handler. Protection against reentrance
 * from the same interrupt is still provided, both by the
 * generic IRQ layer and by the fact that an unacked local
 * APIC does not accept IRQs.
 */
static unsigned int startup_level_ioapic_irq (unsigned int irq)
{
    unmask_IO_APIC_irq(irq);

    return 0; /* don't check for pending */
}

int __read_mostly ioapic_ack_new = 1;
static void setup_ioapic_ack(char *s)
{
    if ( !strcmp(s, "old") )
        ioapic_ack_new = 0;
    else if ( !strcmp(s, "new") )
        ioapic_ack_new = 1;
    else
        printk("Unknown ioapic_ack value specified: '%s'\n", s);
}
custom_param("ioapic_ack", setup_ioapic_ack);

static bool_t io_apic_level_ack_pending(unsigned int irq)
{
    struct irq_pin_list *entry;
    unsigned long flags;

    spin_lock_irqsave(&ioapic_lock, flags);
    entry = &irq_2_pin[irq];
    for (;;) {
        unsigned int reg;
        int pin;

        if (!entry)
            break;

        pin = entry->pin;
        if (pin == -1)
            continue;
        reg = io_apic_read(entry->apic, 0x10 + pin*2);
        /* Is the remote IRR bit set? */
        if (reg & IO_APIC_REDIR_REMOTE_IRR) {
            spin_unlock_irqrestore(&ioapic_lock, flags);
            return 1;
        }
        if (!entry->next)
            break;
        entry = irq_2_pin + entry->next;
    }
    spin_unlock_irqrestore(&ioapic_lock, flags);

    return 0;
}

static void mask_and_ack_level_ioapic_irq (unsigned int irq)
{
    unsigned long v;
    int i;
    struct irq_desc *desc = irq_to_desc(irq);

    irq_complete_move(&desc);

    if ( ioapic_ack_new )
        return;

    if ( !directed_eoi_enabled )
        mask_IO_APIC_irq(irq);

/*
 * It appears there is an erratum which affects at least version 0x11
 * of I/O APIC (that's the 82093AA and cores integrated into various
 * chipsets).  Under certain conditions a level-triggered interrupt is
 * erroneously delivered as edge-triggered one but the respective IRR
 * bit gets set nevertheless.  As a result the I/O unit expects an EOI
 * message but it will never arrive and further interrupts are blocked
 * from the source.  The exact reason is so far unknown, but the
 * phenomenon was observed when two consecutive interrupt requests
 * from a given source get delivered to the same CPU and the source is
 * temporarily disabled in between.
 *
 * A workaround is to simulate an EOI message manually.  We achieve it
 * by setting the trigger mode to edge and then to level when the edge
 * trigger mode gets detected in the TMR of a local APIC for a
 * level-triggered interrupt.  We mask the source for the time of the
 * operation to prevent an edge-triggered interrupt escaping meanwhile.
 * The idea is from Manfred Spraul.  --macro
 */
    i = IO_APIC_VECTOR(irq);

    v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));

    ack_APIC_irq();
    
    if ((irq_desc[irq].status & IRQ_MOVE_PENDING) &&