aboutsummaryrefslogtreecommitdiffstats
path: root/testhal/STM32/STM32F3xx/ADC/mcuconf.h
blob: 1433903d08ce30f90b5a771f8247b520fb5ea74b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
/*
    ChibiOS - Copyright (C) 2006..2018 Giovanni Di Sirio

    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at

        http://www.apache.org/licenses/LICENSE-2.0

    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
*/

#ifndef MCUCONF_H
#define MCUCONF_H

/*
 * STM32F3xx drivers configuration.
 * The following settings override the default settings present in
 * the various device driver implementation headers.
 * Note that the settings for each driver only have effect if the whole
 * driver is enabled in halconf.h.
 *
 * IRQ priorities:
 * 15...0       Lowest...Highest.
 *
 * DMA priorities:
 * 0...3        Lowest...Highest.
 */

#define STM32F3xx_MCUCONF

/*
 * HAL driver system settings.
 */
#define STM32_NO_INIT                       FALSE
#define STM32_PVD_ENABLE                    FALSE
#define STM32_PLS                           STM32_PLS_LEV0
#define STM32_HSI_ENABLED                   TRUE
#define STM32_LSI_ENABLED                   TRUE
#define STM32_HSE_ENABLED                   TRUE
#define STM32_LSE_ENABLED                   FALSE
#define STM32_SW                            STM32_SW_PLL
#define STM32_PLLSRC                        STM32_PLLSRC_HSE
#define STM32_PREDIV_VALUE                  1
#define STM32_PLLMUL_VALUE                  9
#define STM32_HPRE                          STM32_HPRE_DIV1
#define STM32_PPRE1                         STM32_PPRE1_DIV2
#define STM32_PPRE2                         STM32_PPRE2_DIV2
#define STM32_MCOSEL                        STM32_MCOSEL_NOCLOCK
#define STM32_ADC12PRES                     STM32_ADC12PRES_DIV1
#define STM32_ADC34PRES                     STM32_ADC34PRES_DIV1
#define STM32_USART1SW                      STM32_USART1SW_PCLK
#define STM32_USART2SW                      STM32_USART2SW_PCLK
#define STM32_USART3SW                      STM32_USART3SW_PCLK
#define STM32_UART4SW                       STM32_UART4SW_PCLK
#define STM32_UART5SW                       STM32_UART5SW_PCLK
#define STM32_I2C1SW                        STM32_I2C1SW_SYSCLK
#define STM32_I2C2SW                        STM32_I2C2SW_SYSCLK
#define STM32_TIM1SW                        STM32_TIM1SW_PCLK2
#define STM32_TIM8SW                        STM32_TIM8SW_PCLK2
#define STM32_RTCSEL                        STM32_RTCSEL_LSI
#define STM32_USB_CLOCK_REQUIRED            TRUE
#define STM32_USBPRE                        STM32_USBPRE_DIV1P5

/*
 * IRQ system settings.
 */
#define STM32_IRQ_EXTI0_PRIORITY            6
#define STM32_IRQ_EXTI1_PRIORITY            6
#define STM32_IRQ_EXTI2_PRIORITY            6
#define STM32_IRQ_EXTI3_PRIORITY            6
#define STM32_IRQ_EXTI4_PRIORITY            6
#define STM32_IRQ_EXTI5_9_PRIORITY          6
#define STM32_IRQ_EXTI10_15_PRIORITY        6
#define STM32_IRQ_EXTI16_PRIORITY           6
#define STM32_IRQ_EXTI17_PRIORITY           6
#define STM32_IRQ_EXTI18_PRIORITY           6
#define STM32_IRQ_EXTI19_PRIORITY           6
#define STM32_IRQ_EXTI20_PRIORITY           6
#define STM32_IRQ_EXTI21_22_29_PRIORITY     6
#define STM32_IRQ_EXTI30_32_PRIORITY        6
#define STM32_IRQ_EXTI33_PRIORITY           6

/*
 * ADC driver system settings.
 */
#define STM32_ADC_DUAL_MODE                 FALSE
#define STM32_ADC_COMPACT_SAMPLES           FALSE
#define STM32_ADC_USE_ADC1                  TRUE
#define STM32_ADC_USE_ADC2                  TRUE
#define STM32_ADC_USE_ADC3                  TRUE
#define STM32_ADC_USE_ADC4                  TRUE
#define STM32_ADC_ADC1_DMA_STREAM           STM32_DMA_STREAM_ID(1, 1)
#define STM32_ADC_ADC2_DMA_STREAM           STM32_DMA_STREAM_ID(2, 1)
#define STM32_ADC_ADC3_DMA_STREAM           STM32_DMA_STREAM_ID(2, 5)
#define STM32_ADC_ADC4_DMA_STREAM           STM32_DMA_STREAM_ID(2, 2)
#define STM32_ADC_ADC1_DMA_PRIORITY         2
#define STM32_ADC_ADC2_DMA_PRIORITY         2
#define STM32_ADC_ADC3_DMA_PRIORITY         2
#define STM32_ADC_ADC4_DMA_PRIORITY         2
#define STM32_ADC_ADC12_IRQ_PRIORITY        5
#define STM32_ADC_ADC3_IRQ_PRIORITY         5
#define STM32_ADC_ADC4_IRQ_PRIORITY         5
#define STM32_ADC_ADC1_DMA_IRQ_PRIORITY     5
#define STM32_ADC_ADC2_DMA_IRQ_PRIORITY     5
#define STM32_ADC_ADC3_DMA_IRQ_PRIORITY     5
#define STM32_ADC_ADC4_DMA_IRQ_PRIORITY     5
#define STM32_ADC_ADC12_CLOCK_MODE          ADC_CCR_CKMODE_AHB_DIV1
#define STM32_ADC_ADC34_CLOCK_MODE          ADC_CCR_CKMODE_AHB_DIV1

/*
 * CAN driver system settings.
 */
#define STM32_CAN_USE_CAN1                  FALSE
#define STM32_CAN_CAN1_IRQ_PRIORITY         11

/*
 * DAC driver system settings.
 */
#define STM32_DAC_DUAL_MODE                 FALSE
#define STM32_DAC_USE_DAC1_CH1              TRUE
#define STM32_DAC_USE_DAC1_CH2              TRUE
#define STM32_DAC_DAC1_CH1_IRQ_PRIORITY     10
#define STM32_DAC_DAC1_CH2_IRQ_PRIORITY     10
#define STM32_DAC_DAC1_CH1_DMA_PRIORITY     2
#define STM32_DAC_DAC1_CH2_DMA_PRIORITY     2

/*
 * GPT driver system settings.
 */
#define STM32_GPT_USE_TIM1                  FALSE
#define STM32_GPT_USE_TIM2                  FALSE
#define STM32_GPT_USE_TIM3                  FALSE
#define STM32_GPT_USE_TIM4                  FALSE
#define STM32_GPT_USE_TIM6                  FALSE
#define STM32_GPT_USE_TIM7                  FALSE
#define STM32_GPT_USE_TIM8                  FALSE
#define STM32_GPT_TIM1_IRQ_PRIORITY         7
#define STM32_GPT_TIM2_IRQ_PRIORITY         7
#define STM32_GPT_TIM3_IRQ_PRIORITY         7
#define STM32_GPT_TIM4_IRQ_PRIORITY         7
#define STM32_GPT_TIM6_IRQ_PRIORITY         7
#define STM32_GPT_TIM7_IRQ_PRIORITY         7
#define STM32_GPT_TIM8_IRQ_PRIORITY         7

/*
 * I2C driver system settings.
 */
#define STM32_I2C_USE_I2C1                  FALSE
#define STM32_I2C_USE_I2C2                  FALSE
#define STM32_I2C_BUSY_TIMEOUT              50
#define STM32_I2C_I2C1_IRQ_PRIORITY         10
#define STM32_I2C_I2C2_IRQ_PRIORITY         10
#define STM32_I2C_USE_DMA                   TRUE
#define STM32_I2C_I2C1_DMA_PRIORITY         1
#define STM32_I2C_I2C2_DMA_PRIORITY         1
#define STM32_I2C_DMA_ERROR_HOOK(i2cp)      osalSysHalt("DMA failure")

/*
 * ICU driver system settings.
 */
#define STM32_ICU_USE_TIM1                  FALSE
#define STM32_ICU_USE_TIM2                  FALSE
#define STM32_ICU_USE_TIM3                  FALSE
#define STM32_ICU_USE_TIM4                  FALSE
#define STM32_ICU_USE_TIM8                  FALSE
#define STM32_ICU_TIM1_IRQ_PRIORITY         7
#define STM32_ICU_TIM2_IRQ_PRIORITY         7
#define STM32_ICU_TIM3_IRQ_PRIORITY         7
#define STM32_ICU_TIM4_IRQ_PRIORITY         7
#define STM32_ICU_TIM8_IRQ_PRIORITY         7

/*
 * PWM driver system settings.
 */
#define STM32_PWM_USE_ADVANCED              FALSE
#define STM32_PWM_USE_TIM1                  FALSE
#define STM32_PWM_USE_TIM2                  FALSE
#define STM32_PWM_USE_TIM3                  FALSE
#define STM32_PWM_USE_TIM4                  FALSE
#define STM32_PWM_USE_TIM8                  FALSE
#define STM32_PWM_TIM1_IRQ_PRIORITY         7
#define STM32_PWM_TIM2_IRQ_PRIORITY         7
#define STM32_PWM_TIM3_IRQ_PRIORITY         7
#define STM32_PWM_TIM4_IRQ_PRIORITY         7
#define STM32_PWM_TIM8_IRQ_PRIORITY         7

/*
 * SERIAL driver system settings.
 */
#define STM32_SERIAL_USE_USART1             FALSE
#define STM32_SERIAL_USE_USART2             FALSE
#define STM32_SERIAL_USE_USART3             FALSE
#define STM32_SERIAL_USE_UART4              FALSE
#define STM32_SERIAL_USE_UART5              FALSE
#define STM32_SERIAL_USART1_PRIORITY        12
#define STM32_SERIAL_USART2_PRIORITY        12
#define STM32_SERIAL_USART3_PRIORITY        12
#define STM32_SERIAL_UART4_PRIORITY         12
#define STM32_SERIAL_UART5_PRIORITY         12

/*
 * SPI driver system settings.
 */
#define STM32_SPI_USE_SPI1                  FALSE
#define STM32_SPI_USE_SPI2                  FALSE
#define STM32_SPI_USE_SPI3                  FALSE
#define STM32_SPI_SPI1_DMA_PRIORITY         1
#define STM32_SPI_SPI2_DMA_PRIORITY         1
#define STM32_SPI_SPI3_DMA_PRIORITY         1
#define STM32_SPI_SPI1_IRQ_PRIORITY         10
#define STM32_SPI_SPI2_IRQ_PRIORITY         10
#define STM32_SPI_SPI3_IRQ_PRIORITY         10
#define STM32_SPI_DMA_ERROR_HOOK(spip)      osalSysHalt("DMA failure")

/*
 * ST driver system settings.
 */
#define STM32_ST_IRQ_PRIORITY               8
#define STM32_ST_USE_TIMER                  2

/*
 * UART driver system settings.
 */
#define STM32_UART_USE_USART1               FALSE
#define STM32_UART_USE_USART2               FALSE
#define STM32_UART_USE_USART3               FALSE
#define STM32_UART_USART1_IRQ_PRIORITY      12
#define STM32_UART_USART2_IRQ_PRIORITY      12
#define STM32_UART_USART3_IRQ_PRIORITY      12
#define STM32_UART_USART1_DMA_PRIORITY      0
#define STM32_UART_USART2_DMA_PRIORITY      0
#define STM32_UART_USART3_DMA_PRIORITY      0
#define STM32_UART_DMA_ERROR_HOOK(uartp)    osalSysHalt("DMA failure")

/*
 * USB driver system settings.
 */
#define STM32_USB_USE_USB1                  FALSE
#define STM32_USB_LOW_POWER_ON_SUSPEND      FALSE
#define STM32_USB_USB1_HP_IRQ_PRIORITY      13
#define STM32_USB_USB1_LP_IRQ_PRIORITY      14

/*
 * WDG driver system settings.
 */
#define STM32_WDG_USE_IWDG                  FALSE

#endif /* MCUCONF_H */
href='#n856'>856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601
/*
 * File:	mca.c
 * Purpose:	Generic MCA handling layer
 *
 * Updated for latest kernel
 * Copyright (C) 2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * Copyright (C) 2002 Dell Inc.
 * Copyright (C) Matt Domsch (Matt_Domsch@dell.com)
 *
 * Copyright (C) 2002 Intel
 * Copyright (C) Jenna Hall (jenna.s.hall@intel.com)
 *
 * Copyright (C) 2001 Intel
 * Copyright (C) Fred Lewis (frederick.v.lewis@intel.com)
 *
 * Copyright (C) 2000 Intel
 * Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com)
 *
 * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
 * Copyright (C) Vijay Chander(vijay@engr.sgi.com)
 *
 * 03/04/15 D. Mosberger Added INIT backtrace support.
 * 02/03/25 M. Domsch	GUID cleanups
 *
 * 02/01/04 J. Hall	Aligned MCA stack to 16 bytes, added platform vs. CPU
 *			error flag, set SAL default return values, changed
 *			error record structure to linked list, added init call
 *			to sal_get_state_info_size().
 *
 * 01/01/03 F. Lewis    Added setup of CMCI and CPEI IRQs, logging of corrected
 *                      platform errors, completed code for logging of
 *                      corrected & uncorrected machine check errors, and
 *                      updated for conformance with Nov. 2000 revision of the
 *                      SAL 3.0 spec.
 * 00/03/29 C. Fleckenstein  Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
 *                           added min save state dump, added INIT handler.
 *
 * 2003-12-08 Keith Owens <kaos@sgi.com>
 *            smp_call_function() must not be called from interrupt context (can
 *            deadlock on tasklist_lock).  Use keventd to call smp_call_function().
 *
 * 2004-02-01 Keith Owens <kaos@sgi.com>
 *            Avoid deadlock when using printk() for MCA and INIT records.
 *            Delete all record printing code, moved to salinfo_decode in user space.
 *            Mark variables and functions static where possible.
 *            Delete dead variables and functions.
 *            Reorder to remove the need for forward declarations and to consolidate
 *            related code.
 */
#include <linux/config.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kallsyms.h>
#include <linux/smp_lock.h>
#include <linux/bootmem.h>
#include <linux/acpi.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/workqueue.h>

#include <asm/delay.h>
#include <asm/machvec.h>
#include <asm/meminit.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/sal.h>
#include <asm/mca.h>

#include <asm/irq.h>
#include <asm/hw_irq.h>

#ifdef XEN
#include <xen/symbols.h>
#include <xen/mm.h>
#endif

#if defined(IA64_MCA_DEBUG_INFO)
# define IA64_MCA_DEBUG(fmt...)	printk(fmt)
#else
# define IA64_MCA_DEBUG(fmt...)
#endif

/* Used by mca_asm.S */
#ifndef XEN
ia64_mca_sal_to_os_state_t	ia64_sal_to_os_handoff_state;
#else
ia64_mca_sal_to_os_state_t	ia64_sal_to_os_handoff_state[NR_CPUS];
DEFINE_PER_CPU(u64, ia64_sal_to_os_handoff_state_addr); 
#endif
ia64_mca_os_to_sal_state_t	ia64_os_to_sal_handoff_state;
u64				ia64_mca_serialize;
DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
DEFINE_PER_CPU(u64, ia64_mca_pal_pte);	    /* PTE to map PAL code */
DEFINE_PER_CPU(u64, ia64_mca_pal_base);    /* vaddr PAL code granule */

unsigned long __per_cpu_mca[NR_CPUS];

/* In mca_asm.S */
extern void			ia64_monarch_init_handler (void);
extern void			ia64_slave_init_handler (void);

static ia64_mc_info_t		ia64_mc_info;

#ifndef XEN
#define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
#define MIN_CPE_POLL_INTERVAL (2*60*HZ)  /* 2 minutes */
#define CMC_POLL_INTERVAL     (1*60*HZ)  /* 1 minute */
#define CPE_HISTORY_LENGTH    5
#define CMC_HISTORY_LENGTH    5

static struct timer_list cpe_poll_timer;
static struct timer_list cmc_poll_timer;
/*
 * This variable tells whether we are currently in polling mode.
 * Start with this in the wrong state so we won't play w/ timers
 * before the system is ready.
 */
static int cmc_polling_enabled = 1;

/*
 * Clearing this variable prevents CPE polling from getting activated
 * in mca_late_init.  Use it if your system doesn't provide a CPEI,
 * but encounters problems retrieving CPE logs.  This should only be
 * necessary for debugging.
 */
static int cpe_poll_enabled = 1;

extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
#endif /* !XEN */

static int mca_init;

#ifndef XEN
/*
 * IA64_MCA log support
 */
#define IA64_MAX_LOGS		2	/* Double-buffering for nested MCAs */
#define IA64_MAX_LOG_TYPES      4   /* MCA, INIT, CMC, CPE */

typedef struct ia64_state_log_s
{
	spinlock_t	isl_lock;
	int		isl_index;
	unsigned long	isl_count;
	ia64_err_rec_t  *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
} ia64_state_log_t;

static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];

#define IA64_LOG_ALLOCATE(it, size) \
	{ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
		(ia64_err_rec_t *)alloc_bootmem(size); \
	ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
		(ia64_err_rec_t *)alloc_bootmem(size);}
#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
#define IA64_LOG_LOCK(it)      spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
#define IA64_LOG_UNLOCK(it)    spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
#define IA64_LOG_NEXT_INDEX(it)    ia64_state_log[it].isl_index
#define IA64_LOG_CURR_INDEX(it)    1 - ia64_state_log[it].isl_index
#define IA64_LOG_INDEX_INC(it) \
    {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
    ia64_state_log[it].isl_count++;}
#define IA64_LOG_INDEX_DEC(it) \
    ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
#define IA64_LOG_NEXT_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
#define IA64_LOG_CURR_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
#define IA64_LOG_COUNT(it)         ia64_state_log[it].isl_count

/*
 * ia64_log_init
 *	Reset the OS ia64 log buffer
 * Inputs   :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
 * Outputs	:	None
 */
static void
ia64_log_init(int sal_info_type)
{
	u64	max_size = 0;

	IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
	IA64_LOG_LOCK_INIT(sal_info_type);

	// SAL will tell us the maximum size of any error record of this type
	max_size = ia64_sal_get_state_info_size(sal_info_type);
	if (!max_size)
		/* alloc_bootmem() doesn't like zero-sized allocations! */
		return;

	// set up OS data structures to hold error info
	IA64_LOG_ALLOCATE(sal_info_type, max_size);
	memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
	memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
}

/*
 * ia64_log_get
 *
 *	Get the current MCA log from SAL and copy it into the OS log buffer.
 *
 *  Inputs  :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
 *              irq_safe    whether you can use printk at this point
 *  Outputs :   size        (total record length)
 *              *buffer     (ptr to error record)
 *
 */
static u64
ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
{
	sal_log_record_header_t     *log_buffer;
	u64                         total_len = 0;
	int                         s;

	IA64_LOG_LOCK(sal_info_type);

	/* Get the process state information */
	log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);

	total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);

	if (total_len) {
		IA64_LOG_INDEX_INC(sal_info_type);
		IA64_LOG_UNLOCK(sal_info_type);
		if (irq_safe) {
			IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "
				       "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len);
		}
		*buffer = (u8 *) log_buffer;
		return total_len;
	} else {
		IA64_LOG_UNLOCK(sal_info_type);
		return 0;
	}
}

/*
 *  ia64_mca_log_sal_error_record
 *
 *  This function retrieves a specified error record type from SAL
 *  and wakes up any processes waiting for error records.
 *
 *  Inputs  :   sal_info_type   (Type of error record MCA/CMC/CPE/INIT)
 */
static void
ia64_mca_log_sal_error_record(int sal_info_type)
{
	u8 *buffer;
	sal_log_record_header_t *rh;
	u64 size;
	int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != SAL_INFO_TYPE_INIT;
#ifdef IA64_MCA_DEBUG_INFO
	static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
#endif

	size = ia64_log_get(sal_info_type, &buffer, irq_safe);
	if (!size)
		return;

	salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);

	if (irq_safe)
		IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
			smp_processor_id(),
			sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");

	/* Clear logs from corrected errors in case there's no user-level logger */
	rh = (sal_log_record_header_t *)buffer;
	if (rh->severity == sal_log_severity_corrected)
		ia64_sal_clear_state_info(sal_info_type);
}

/*
 * platform dependent error handling
 */
#endif /* !XEN */
#ifndef PLATFORM_MCA_HANDLERS
#ifndef XEN

#ifdef CONFIG_ACPI

int cpe_vector = -1;

static irqreturn_t
ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
{
	static unsigned long	cpe_history[CPE_HISTORY_LENGTH];
	static int		index;
	static DEFINE_SPINLOCK(cpe_history_lock);

	IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
		       __FUNCTION__, cpe_irq, smp_processor_id());

	/* SAL spec states this should run w/ interrupts enabled */
	local_irq_enable();

	/* Get the CPE error record and log it */
	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);

	spin_lock(&cpe_history_lock);
	if (!cpe_poll_enabled && cpe_vector >= 0) {

		int i, count = 1; /* we know 1 happened now */
		unsigned long now = jiffies;

		for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
			if (now - cpe_history[i] <= HZ)
				count++;
		}

		IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);
		if (count >= CPE_HISTORY_LENGTH) {

			cpe_poll_enabled = 1;
			spin_unlock(&cpe_history_lock);
			disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));

			/*
			 * Corrected errors will still be corrected, but
			 * make sure there's a log somewhere that indicates
			 * something is generating more than we can handle.
			 */
			printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");

			mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);

			/* lock already released, get out now */
			return IRQ_HANDLED;
		} else {
			cpe_history[index++] = now;
			if (index == CPE_HISTORY_LENGTH)
				index = 0;
		}
	}
	spin_unlock(&cpe_history_lock);
	return IRQ_HANDLED;
}

#endif /* CONFIG_ACPI */
#endif /* !XEN */

static void
show_min_state (pal_min_state_area_t *minstate)
{
	u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri;
	u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri;

	printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits);
	printk("pr\t\t%016lx\n", minstate->pmsa_pr);
	printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0);
	printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc);
	printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip);
	printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr);
	printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs);
	printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip);
	printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr);
	printk("xfs\t\t%016lx\n", minstate->pmsa_xfs);
	printk("b1\t\t%016lx ", minstate->pmsa_br1);
	print_symbol("%s\n", minstate->pmsa_br1);

	printk("\nstatic registers r0-r15:\n");
	printk(" r0- 3 %016lx %016lx %016lx %016lx\n",
	       0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]);
	printk(" r4- 7 %016lx %016lx %016lx %016lx\n",
	       minstate->pmsa_gr[3], minstate->pmsa_gr[4],
	       minstate->pmsa_gr[5], minstate->pmsa_gr[6]);
	printk(" r8-11 %016lx %016lx %016lx %016lx\n",
	       minstate->pmsa_gr[7], minstate->pmsa_gr[8],
	       minstate->pmsa_gr[9], minstate->pmsa_gr[10]);
	printk("r12-15 %016lx %016lx %016lx %016lx\n",
	       minstate->pmsa_gr[11], minstate->pmsa_gr[12],
	       minstate->pmsa_gr[13], minstate->pmsa_gr[14]);

	printk("\nbank 0:\n");
	printk("r16-19 %016lx %016lx %016lx %016lx\n",
	       minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1],
	       minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]);
	printk("r20-23 %016lx %016lx %016lx %016lx\n",
	       minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5],
	       minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]);
	printk("r24-27 %016lx %016lx %016lx %016lx\n",
	       minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9],
	       minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]);
	printk("r28-31 %016lx %016lx %016lx %016lx\n",
	       minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13],
	       minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]);

	printk("\nbank 1:\n");
	printk("r16-19 %016lx %016lx %016lx %016lx\n",
	       minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1],
	       minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]);
	printk("r20-23 %016lx %016lx %016lx %016lx\n",
	       minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5],
	       minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]);
	printk("r24-27 %016lx %016lx %016lx %016lx\n",
	       minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9],
	       minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]);
	printk("r28-31 %016lx %016lx %016lx %016lx\n",
	       minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13],
	       minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]);
}

static void
fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw)
{
	u64 *dst_banked, *src_banked, bit, shift, nat_bits;
	int i;

	/*
	 * First, update the pt-regs and switch-stack structures with the contents stored
	 * in the min-state area:
	 */
	if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) {
		pt->cr_ipsr = ms->pmsa_xpsr;
		pt->cr_iip = ms->pmsa_xip;
		pt->cr_ifs = ms->pmsa_xfs;
	} else {
		pt->cr_ipsr = ms->pmsa_ipsr;
		pt->cr_iip = ms->pmsa_iip;
		pt->cr_ifs = ms->pmsa_ifs;
	}
	pt->ar_rsc = ms->pmsa_rsc;
	pt->pr = ms->pmsa_pr;
	pt->r1 = ms->pmsa_gr[0];
	pt->r2 = ms->pmsa_gr[1];
	pt->r3 = ms->pmsa_gr[2];
	sw->r4 = ms->pmsa_gr[3];
	sw->r5 = ms->pmsa_gr[4];
	sw->r6 = ms->pmsa_gr[5];
	sw->r7 = ms->pmsa_gr[6];
	pt->r8 = ms->pmsa_gr[7];
	pt->r9 = ms->pmsa_gr[8];
	pt->r10 = ms->pmsa_gr[9];
	pt->r11 = ms->pmsa_gr[10];
	pt->r12 = ms->pmsa_gr[11];
	pt->r13 = ms->pmsa_gr[12];
	pt->r14 = ms->pmsa_gr[13];
	pt->r15 = ms->pmsa_gr[14];
	dst_banked = &pt->r16;		/* r16-r31 are contiguous in struct pt_regs */
	src_banked = ms->pmsa_bank1_gr;
	for (i = 0; i < 16; ++i)
		dst_banked[i] = src_banked[i];
	pt->b0 = ms->pmsa_br0;
	sw->b1 = ms->pmsa_br1;

	/* construct the NaT bits for the pt-regs structure: */
#	define PUT_NAT_BIT(dst, addr)					\
	do {								\
		bit = nat_bits & 1; nat_bits >>= 1;			\
		shift = ((unsigned long) addr >> 3) & 0x3f;		\
		dst = ((dst) & ~(1UL << shift)) | (bit << shift);	\
	} while (0)

	/* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */
	shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f;
	nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift));

	PUT_NAT_BIT(sw->caller_unat, &pt->r1);
	PUT_NAT_BIT(sw->caller_unat, &pt->r2);
	PUT_NAT_BIT(sw->caller_unat, &pt->r3);
	PUT_NAT_BIT(sw->ar_unat, &sw->r4);
	PUT_NAT_BIT(sw->ar_unat, &sw->r5);
	PUT_NAT_BIT(sw->ar_unat, &sw->r6);
	PUT_NAT_BIT(sw->ar_unat, &sw->r7);
	PUT_NAT_BIT(sw->caller_unat, &pt->r8);	PUT_NAT_BIT(sw->caller_unat, &pt->r9);
	PUT_NAT_BIT(sw->caller_unat, &pt->r10);	PUT_NAT_BIT(sw->caller_unat, &pt->r11);
	PUT_NAT_BIT(sw->caller_unat, &pt->r12);	PUT_NAT_BIT(sw->caller_unat, &pt->r13);
	PUT_NAT_BIT(sw->caller_unat, &pt->r14);	PUT_NAT_BIT(sw->caller_unat, &pt->r15);
	nat_bits >>= 16;	/* skip over bank0 NaT bits */
	PUT_NAT_BIT(sw->caller_unat, &pt->r16);	PUT_NAT_BIT(sw->caller_unat, &pt->r17);
	PUT_NAT_BIT(sw->caller_unat, &pt->r18);	PUT_NAT_BIT(sw->caller_unat, &pt->r19);
	PUT_NAT_BIT(sw->caller_unat, &pt->r20);	PUT_NAT_BIT(sw->caller_unat, &pt->r21);
	PUT_NAT_BIT(sw->caller_unat, &pt->r22);	PUT_NAT_BIT(sw->caller_unat, &pt->r23);
	PUT_NAT_BIT(sw->caller_unat, &pt->r24);	PUT_NAT_BIT(sw->caller_unat, &pt->r25);
	PUT_NAT_BIT(sw->caller_unat, &pt->r26);	PUT_NAT_BIT(sw->caller_unat, &pt->r27);
	PUT_NAT_BIT(sw->caller_unat, &pt->r28);	PUT_NAT_BIT(sw->caller_unat, &pt->r29);
	PUT_NAT_BIT(sw->caller_unat, &pt->r30);	PUT_NAT_BIT(sw->caller_unat, &pt->r31);
}

#ifdef XEN
static spinlock_t init_dump_lock = SPIN_LOCK_UNLOCKED;
static spinlock_t show_stack_lock = SPIN_LOCK_UNLOCKED;

static void
save_ksp (struct unw_frame_info *info, void *arg)
{
	current->arch._thread.ksp = (__u64)(info->sw) - 16;
	wmb();
}

/* FIXME */
int try_crashdump(struct pt_regs *a) { return 0; }

#define CPU_FLUSH_RETRY_MAX 5
static void
init_cache_flush (void)
{
	unsigned long flags;
	int i;
	s64 rval = 0;
	u64 vector, progress = 0;

	for (i = 0; i < CPU_FLUSH_RETRY_MAX; i++) {
		local_irq_save(flags);
		rval = ia64_pal_cache_flush(PAL_CACHE_TYPE_INSTRUCTION_DATA,
		                            0, &progress, &vector);
		local_irq_restore(flags);
		if (rval == 0){
			printk("\nPAL cache flush success\n");
			return;
		}
	}
	printk("\nPAL cache flush failed. status=%ld\n",rval);
}
#endif /* XEN */

static void
init_handler_platform (pal_min_state_area_t *ms,
		       struct pt_regs *pt, struct switch_stack *sw)
{
	struct unw_frame_info info;

	/* if a kernel debugger is available call it here else just dump the registers */

	/*
	 * Wait for a bit.  On some machines (e.g., HP's zx2000 and zx6000, INIT can be
	 * generated via the BMC's command-line interface, but since the console is on the
	 * same serial line, the user will need some time to switch out of the BMC before
	 * the dump begins.
	 */
	printk("Delaying for 5 seconds...\n");
	udelay(5*1000000);
#ifdef XEN
	fetch_min_state(ms, pt, sw);
	spin_lock(&show_stack_lock);
#endif
	show_min_state(ms);

#ifdef XEN
	printk("Backtrace of current vcpu (vcpu_id %d)\n", current->vcpu_id);
#else
	printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);
	fetch_min_state(ms, pt, sw);
#endif
	unw_init_from_interruption(&info, current, pt, sw);
	ia64_do_show_stack(&info, NULL);
#ifdef XEN
	unw_init_running(save_ksp, NULL);
	spin_unlock(&show_stack_lock);
	wmb();
	init_cache_flush();

	if (spin_trylock(&init_dump_lock)) {
#ifdef CONFIG_SMP
		udelay(5*1000000);
#endif
		if (try_crashdump(pt) == 0)
			printk("\nINIT dump complete.  Please reboot now.\n");
	}
	printk("%s: CPU%d init handler done\n",
	       __FUNCTION__, smp_processor_id());
#else /* XEN */
#ifdef CONFIG_SMP
	/* read_trylock() would be handy... */
	if (!tasklist_lock.write_lock)
		read_lock(&tasklist_lock);
#endif
	{
		struct task_struct *g, *t;
		do_each_thread (g, t) {
			if (t == current)
				continue;

			printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
			show_stack(t, NULL);
		} while_each_thread (g, t);
	}
#ifdef CONFIG_SMP
	if (!tasklist_lock.write_lock)
		read_unlock(&tasklist_lock);
#endif

	printk("\nINIT dump complete.  Please reboot now.\n");
#endif /* XEN */
	while (1);			/* hang city if no debugger */
}

#ifndef XEN
#ifdef CONFIG_ACPI
/*
 * ia64_mca_register_cpev
 *
 *  Register the corrected platform error vector with SAL.
 *
 *  Inputs
 *      cpev        Corrected Platform Error Vector number
 *
 *  Outputs
 *      None
 */
static void
ia64_mca_register_cpev (int cpev)
{
	/* Register the CPE interrupt vector with SAL */
	struct ia64_sal_retval isrv;

	isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
	if (isrv.status) {
		printk(KERN_ERR "Failed to register Corrected Platform "
		       "Error interrupt vector with SAL (status %ld)\n", isrv.status);
		return;
	}

	IA64_MCA_DEBUG("%s: corrected platform error "
		       "vector %#x registered\n", __FUNCTION__, cpev);
}
#endif /* CONFIG_ACPI */

#endif /* !XEN */
#endif /* PLATFORM_MCA_HANDLERS */
#ifndef XEN

/*
 * ia64_mca_cmc_vector_setup
 *
 *  Setup the corrected machine check vector register in the processor.
 *  (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
 *  This function is invoked on a per-processor basis.
 *
 * Inputs
 *      None
 *
 * Outputs
 *	None
 */
void
ia64_mca_cmc_vector_setup (void)
{
	cmcv_reg_t	cmcv;

	cmcv.cmcv_regval	= 0;
	cmcv.cmcv_mask		= 1;        /* Mask/disable interrupt at first */
	cmcv.cmcv_vector	= IA64_CMC_VECTOR;
	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);

	IA64_MCA_DEBUG("%s: CPU %d corrected "
		       "machine check vector %#x registered.\n",
		       __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);

	IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
		       __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
}

/*
 * ia64_mca_cmc_vector_disable
 *
 *  Mask the corrected machine check vector register in the processor.
 *  This function is invoked on a per-processor basis.
 *
 * Inputs
 *      dummy(unused)
 *
 * Outputs
 *	None
 */
static void
ia64_mca_cmc_vector_disable (void *dummy)
{
	cmcv_reg_t	cmcv;

	cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);

	cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);

	IA64_MCA_DEBUG("%s: CPU %d corrected "
		       "machine check vector %#x disabled.\n",
		       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
}

/*
 * ia64_mca_cmc_vector_enable
 *
 *  Unmask the corrected machine check vector register in the processor.
 *  This function is invoked on a per-processor basis.
 *
 * Inputs
 *      dummy(unused)
 *
 * Outputs
 *	None
 */
static void
ia64_mca_cmc_vector_enable (void *dummy)
{
	cmcv_reg_t	cmcv;

	cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);

	cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);

	IA64_MCA_DEBUG("%s: CPU %d corrected "
		       "machine check vector %#x enabled.\n",
		       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
}

/*
 * ia64_mca_cmc_vector_disable_keventd
 *
 * Called via keventd (smp_call_function() is not safe in interrupt context) to
 * disable the cmc interrupt vector.
 */
static void
ia64_mca_cmc_vector_disable_keventd(void *unused)
{
	on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
}

/*
 * ia64_mca_cmc_vector_enable_keventd
 *
 * Called via keventd (smp_call_function() is not safe in interrupt context) to
 * enable the cmc interrupt vector.
 */
static void
ia64_mca_cmc_vector_enable_keventd(void *unused)
{
	on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
}

/*
 * ia64_mca_wakeup_ipi_wait
 *
 *	Wait for the inter-cpu interrupt to be sent by the
 *	monarch processor once it is done with handling the
 *	MCA.
 *
 *  Inputs  :   None
 *  Outputs :   None
 */
static void
ia64_mca_wakeup_ipi_wait(void)
{
	int	irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6);
	int	irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f);
	u64	irr = 0;

	do {
		switch(irr_num) {
		      case 0:
			irr = ia64_getreg(_IA64_REG_CR_IRR0);
			break;
		      case 1:
			irr = ia64_getreg(_IA64_REG_CR_IRR1);
			break;
		      case 2:
			irr = ia64_getreg(_IA64_REG_CR_IRR2);
			break;
		      case 3:
			irr = ia64_getreg(_IA64_REG_CR_IRR3);
			break;
		}
		cpu_relax();
	} while (!(irr & (1UL << irr_bit))) ;
}

/*
 * ia64_mca_wakeup
 *
 *	Send an inter-cpu interrupt to wake-up a particular cpu
 *	and mark that cpu to be out of rendez.
 *
 *  Inputs  :   cpuid
 *  Outputs :   None
 */
static void
ia64_mca_wakeup(int cpu)
{
	platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;

}

/*
 * ia64_mca_wakeup_all
 *
 *	Wakeup all the cpus which have rendez'ed previously.
 *
 *  Inputs  :   None
 *  Outputs :   None
 */
static void
ia64_mca_wakeup_all(void)
{
	int cpu;

	/* Clear the Rendez checkin flag for all cpus */
	for(cpu = 0; cpu < NR_CPUS; cpu++) {
		if (!cpu_online(cpu))
			continue;
		if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
			ia64_mca_wakeup(cpu);
	}

}

/*
 * ia64_mca_rendez_interrupt_handler
 *
 *	This is handler used to put slave processors into spinloop
 *	while the monarch processor does the mca handling and later
 *	wake each slave up once the monarch is done.
 *
 *  Inputs  :   None
 *  Outputs :   None
 */
static irqreturn_t
ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
{
	unsigned long flags;
	int cpu = smp_processor_id();

	/* Mask all interrupts */
	local_irq_save(flags);

	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
	/* Register with the SAL monarch that the slave has
	 * reached SAL
	 */
	ia64_sal_mc_rendez();

	/* Wait for the wakeup IPI from the monarch
	 * This waiting is done by polling on the wakeup-interrupt
	 * vector bit in the processor's IRRs
	 */
	ia64_mca_wakeup_ipi_wait();

	/* Enable all interrupts */
	local_irq_restore(flags);
	return IRQ_HANDLED;
}

/*
 * ia64_mca_wakeup_int_handler
 *
 *	The interrupt handler for processing the inter-cpu interrupt to the
 *	slave cpu which was spinning in the rendez loop.
 *	Since this spinning is done by turning off the interrupts and
 *	polling on the wakeup-interrupt bit in the IRR, there is
 *	nothing useful to be done in the handler.
 *
 *  Inputs  :   wakeup_irq  (Wakeup-interrupt bit)
 *	arg		(Interrupt handler specific argument)
 *	ptregs		(Exception frame at the time of the interrupt)
 *  Outputs :   None
 *
 */
static irqreturn_t
ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs)
{
	return IRQ_HANDLED;
}

/*
 * ia64_return_to_sal_check
 *
 *	This is function called before going back from the OS_MCA handler
 *	to the OS_MCA dispatch code which finally takes the control back
 *	to the SAL.
 *	The main purpose of this routine is to setup the OS_MCA to SAL
 *	return state which can be used by the OS_MCA dispatch code
 *	just before going back to SAL.
 *
 *  Inputs  :   None
 *  Outputs :   None
 */

static void
ia64_return_to_sal_check(int recover)
{

	/* Copy over some relevant stuff from the sal_to_os_mca_handoff
	 * so that it can be used at the time of os_mca_to_sal_handoff
	 */
	ia64_os_to_sal_handoff_state.imots_sal_gp =
		ia64_sal_to_os_handoff_state.imsto_sal_gp;

	ia64_os_to_sal_handoff_state.imots_sal_check_ra =
		ia64_sal_to_os_handoff_state.imsto_sal_check_ra;

	if (recover)
		ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED;
	else
		ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT;

	/* Default = tell SAL to return to same context */
	ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;

	ia64_os_to_sal_handoff_state.imots_new_min_state =
		(u64 *)ia64_sal_to_os_handoff_state.pal_min_state;

}

/* Function pointer for extra MCA recovery */
int (*ia64_mca_ucmc_extension)
	(void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*)
	= NULL;

int
ia64_reg_MCA_extension(void *fn)
{
	if (ia64_mca_ucmc_extension)
		return 1;

	ia64_mca_ucmc_extension = fn;
	return 0;
}

void
ia64_unreg_MCA_extension(void)
{
	if (ia64_mca_ucmc_extension)
		ia64_mca_ucmc_extension = NULL;
}

EXPORT_SYMBOL(ia64_reg_MCA_extension);
EXPORT_SYMBOL(ia64_unreg_MCA_extension);

/*
 * ia64_mca_ucmc_handler
 *
 *	This is uncorrectable machine check handler called from OS_MCA
 *	dispatch code which is in turn called from SAL_CHECK().
 *	This is the place where the core of OS MCA handling is done.
 *	Right now the logs are extracted and displayed in a well-defined
 *	format. This handler code is supposed to be run only on the
 *	monarch processor. Once the monarch is done with MCA handling
 *	further MCA logging is enabled by clearing logs.
 *	Monarch also has the duty of sending wakeup-IPIs to pull the
 *	slave processors out of rendezvous spinloop.
 *
 *  Inputs  :   None
 *  Outputs :   None
 */
void
ia64_mca_ucmc_handler(void)
{
	pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
		&ia64_sal_to_os_handoff_state.proc_state_param;
	int recover; 

	/* Get the MCA error record and log it */
	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);

	/* TLB error is only exist in this SAL error record */
	recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
	/* other error recovery */
	   || (ia64_mca_ucmc_extension 
		&& ia64_mca_ucmc_extension(
			IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
			&ia64_sal_to_os_handoff_state,
			&ia64_os_to_sal_handoff_state)); 

	if (recover) {
		sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
		rh->severity = sal_log_severity_corrected;
		ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
	}
	/*
	 *  Wakeup all the processors which are spinning in the rendezvous
	 *  loop.
	 */
	ia64_mca_wakeup_all();

	/* Return to SAL */
	ia64_return_to_sal_check(recover);
}

static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);

/*
 * ia64_mca_cmc_int_handler
 *
 *  This is corrected machine check interrupt handler.
 *	Right now the logs are extracted and displayed in a well-defined
 *	format.
 *
 * Inputs
 *      interrupt number
 *      client data arg ptr
 *      saved registers ptr
 *
 * Outputs
 *	None
 */
static irqreturn_t
ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
{
	static unsigned long	cmc_history[CMC_HISTORY_LENGTH];
	static int		index;
	static DEFINE_SPINLOCK(cmc_history_lock);

	IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
		       __FUNCTION__, cmc_irq, smp_processor_id());

	/* SAL spec states this should run w/ interrupts enabled */
	local_irq_enable();

	/* Get the CMC error record and log it */
	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);

	spin_lock(&cmc_history_lock);
	if (!cmc_polling_enabled) {
		int i, count = 1; /* we know 1 happened now */
		unsigned long now = jiffies;

		for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
			if (now - cmc_history[i] <= HZ)
				count++;
		}

		IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
		if (count >= CMC_HISTORY_LENGTH) {

			cmc_polling_enabled = 1;
			spin_unlock(&cmc_history_lock);
			schedule_work(&cmc_disable_work);

			/*
			 * Corrected errors will still be corrected, but
			 * make sure there's a log somewhere that indicates
			 * something is generating more than we can handle.
			 */
			printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");

			mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);

			/* lock already released, get out now */
			return IRQ_HANDLED;
		} else {
			cmc_history[index++] = now;
			if (index == CMC_HISTORY_LENGTH)
				index = 0;
		}
	}
	spin_unlock(&cmc_history_lock);
	return IRQ_HANDLED;
}

/*
 *  ia64_mca_cmc_int_caller
 *
 * 	Triggered by sw interrupt from CMC polling routine.  Calls
 * 	real interrupt handler and either triggers a sw interrupt
 * 	on the next cpu or does cleanup at the end.
 *
 * Inputs
 *	interrupt number
 *	client data arg ptr
 *	saved registers ptr
 * Outputs
 * 	handled
 */
static irqreturn_t
ia64_mca_cmc_int_caller(int cmc_irq, void *arg, struct pt_regs *ptregs)
{
	static int start_count = -1;
	unsigned int cpuid;

	cpuid = smp_processor_id();

	/* If first cpu, update count */
	if (start_count == -1)
		start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);

	ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs);

	for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);

	if (cpuid < NR_CPUS) {
		platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
	} else {
		/* If no log record, switch out of polling mode */
		if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {

			printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
			schedule_work(&cmc_enable_work);
			cmc_polling_enabled = 0;

		} else {

			mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
		}

		start_count = -1;
	}

	return IRQ_HANDLED;
}

/*
 *  ia64_mca_cmc_poll
 *
 *	Poll for Corrected Machine Checks (CMCs)
 *
 * Inputs   :   dummy(unused)
 * Outputs  :   None
 *
 */
static void
ia64_mca_cmc_poll (unsigned long dummy)
{
	/* Trigger a CMC interrupt cascade  */
	platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
}

/*
 *  ia64_mca_cpe_int_caller
 *
 * 	Triggered by sw interrupt from CPE polling routine.  Calls
 * 	real interrupt handler and either triggers a sw interrupt
 * 	on the next cpu or does cleanup at the end.
 *
 * Inputs
 *	interrupt number
 *	client data arg ptr
 *	saved registers ptr
 * Outputs
 * 	handled
 */
#ifdef CONFIG_ACPI

static irqreturn_t
ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
{
	static int start_count = -1;
	static int poll_time = MIN_CPE_POLL_INTERVAL;
	unsigned int cpuid;

	cpuid = smp_processor_id();

	/* If first cpu, update count */
	if (start_count == -1)
		start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);

	ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs);

	for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);

	if (cpuid < NR_CPUS) {
		platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
	} else {
		/*
		 * If a log was recorded, increase our polling frequency,
		 * otherwise, backoff or return to interrupt mode.
		 */
		if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
			poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
		} else if (cpe_vector < 0) {
			poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
		} else {
			poll_time = MIN_CPE_POLL_INTERVAL;

			printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");
			enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
			cpe_poll_enabled = 0;
		}

		if (cpe_poll_enabled)
			mod_timer(&cpe_poll_timer, jiffies + poll_time);
		start_count = -1;
	}

	return IRQ_HANDLED;
}

/*
 *  ia64_mca_cpe_poll
 *
 *	Poll for Corrected Platform Errors (CPEs), trigger interrupt
 *	on first cpu, from there it will trickle through all the cpus.
 *
 * Inputs   :   dummy(unused)
 * Outputs  :   None
 *
 */
static void
ia64_mca_cpe_poll (unsigned long dummy)
{
	/* Trigger a CPE interrupt cascade  */
	platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
}

#endif /* CONFIG_ACPI */
#endif /* !XEN */

/*
 * C portion of the OS INIT handler
 *
 * Called from ia64_monarch_init_handler
 *
 * Inputs: pointer to pt_regs where processor info was saved.
 *
 * Returns:
 *   0 if SAL must warm boot the System
 *   1 if SAL must return to interrupted context using PAL_MC_RESUME
 *
 */
void
ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
{
	pal_min_state_area_t *ms;
#ifdef XEN
	int cpu = smp_processor_id();

	printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
	       ia64_sal_to_os_handoff_state[cpu].proc_state_param);
#endif

#ifndef XEN
	oops_in_progress = 1;	/* avoid deadlock in printk, but it makes recovery dodgy */
	console_loglevel = 15;	/* make sure printks make it to console */

	printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
		ia64_sal_to_os_handoff_state.proc_state_param);

	/*
	 * Address of minstate area provided by PAL is physical,
	 * uncacheable (bit 63 set). Convert to Linux virtual
	 * address in region 6.
	 */
	ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61));
#else
	/* Xen virtual address in region 7. */
	ms = __va((pal_min_state_area_t *)(ia64_sal_to_os_handoff_state[cpu].pal_min_state));
#endif

	init_handler_platform(ms, pt, sw);	/* call platform specific routines */
}

#ifndef XEN
static int __init
ia64_mca_disable_cpe_polling(char *str)
{
	cpe_poll_enabled = 0;
	return 1;
}

__setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);

static struct irqaction cmci_irqaction = {
	.handler =	ia64_mca_cmc_int_handler,
	.flags =	SA_INTERRUPT,
	.name =		"cmc_hndlr"
};

static struct irqaction cmcp_irqaction = {
	.handler =	ia64_mca_cmc_int_caller,
	.flags =	SA_INTERRUPT,
	.name =		"cmc_poll"
};

static struct irqaction mca_rdzv_irqaction = {
	.handler =	ia64_mca_rendez_int_handler,
	.flags =	SA_INTERRUPT,
	.name =		"mca_rdzv"
};

static struct irqaction mca_wkup_irqaction = {
	.handler =	ia64_mca_wakeup_int_handler,
	.flags =	SA_INTERRUPT,
	.name =		"mca_wkup"
};

#ifdef CONFIG_ACPI
static struct irqaction mca_cpe_irqaction = {
	.handler =	ia64_mca_cpe_int_handler,
	.flags =	SA_INTERRUPT,
	.name =		"cpe_hndlr"
};

static struct irqaction mca_cpep_irqaction = {
	.handler =	ia64_mca_cpe_int_caller,
	.flags =	SA_INTERRUPT,
	.name =		"cpe_poll"
};
#endif /* CONFIG_ACPI */
#endif /* !XEN */

/* Do per-CPU MCA-related initialization.  */

void __devinit
ia64_mca_cpu_init(void *cpu_data)
{
	void *pal_vaddr;

	if (smp_processor_id() == 0) {
		void *mca_data;
		int cpu;

#ifdef XEN
		unsigned int pageorder;
		pageorder  = get_order_from_bytes(sizeof(struct ia64_mca_cpu));
#else
		mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu)
					 * NR_CPUS);
#endif
		for (cpu = 0; cpu < NR_CPUS; cpu++) {
#ifdef XEN
			mca_data = alloc_xenheap_pages(pageorder);
			__per_cpu_mca[cpu] = __pa(mca_data);
			IA64_MCA_DEBUG("%s: __per_cpu_mca[%d]=%lx"
			               "(mca_data[%d]=%lx)\n",
				       __FUNCTION__, cpu, __per_cpu_mca[cpu],
				       cpu, (u64)mca_data);
#else
			__per_cpu_mca[cpu] = __pa(mca_data);
			mca_data += sizeof(struct ia64_mca_cpu);
#endif
		}
	}

        /*
         * The MCA info structure was allocated earlier and its
         * physical address saved in __per_cpu_mca[cpu].  Copy that
         * address * to ia64_mca_data so we can access it as a per-CPU
         * variable.
         */
	__get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()];
#ifdef XEN
	IA64_MCA_DEBUG("%s: CPU#%d, ia64_mca_data=%lx\n", __FUNCTION__,
	               smp_processor_id(), __get_cpu_var(ia64_mca_data));

	/* sal_to_os_handoff for smp support */
	__get_cpu_var(ia64_sal_to_os_handoff_state_addr) =
	              __pa(&ia64_sal_to_os_handoff_state[smp_processor_id()]);
	IA64_MCA_DEBUG("%s: CPU#%d, ia64_sal_to_os=%lx\n", __FUNCTION__,
	               smp_processor_id(),
		       __get_cpu_var(ia64_sal_to_os_handoff_state_addr));
#endif

	/*
	 * Stash away a copy of the PTE needed to map the per-CPU page.
	 * We may need it during MCA recovery.
	 */
	__get_cpu_var(ia64_mca_per_cpu_pte) =
		pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));

        /*
         * Also, stash away a copy of the PAL address and the PTE
         * needed to map it.
         */
        pal_vaddr = efi_get_pal_addr();
	if (!pal_vaddr)
		return;
	__get_cpu_var(ia64_mca_pal_base) =
		GRANULEROUNDDOWN((unsigned long) pal_vaddr);
	__get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr),
							      PAGE_KERNEL));
}

/*
 * ia64_mca_init
 *
 *  Do all the system level mca specific initialization.
 *
 *	1. Register spinloop and wakeup request interrupt vectors
 *
 *	2. Register OS_MCA handler entry point
 *
 *	3. Register OS_INIT handler entry point
 *
 *  4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
 *
 *  Note that this initialization is done very early before some kernel
 *  services are available.
 *
 *  Inputs  :   None
 *
 *  Outputs :   None
 */
void __init
ia64_mca_init(void)
{
	ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
	ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler;
	ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
#ifdef XEN
	s64 rc;

	slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;

	IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
#else
	int i;
	s64 rc;
	struct ia64_sal_retval isrv;
	u64 timeout = IA64_MCA_RENDEZ_TIMEOUT;	/* platform specific */

	IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);

	/* Clear the Rendez checkin flag for all cpus */
	for(i = 0 ; i < NR_CPUS; i++)
		ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;

	/*
	 * Register the rendezvous spinloop and wakeup mechanism with SAL
	 */

	/* Register the rendezvous interrupt vector with SAL */
	while (1) {
		isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
					      SAL_MC_PARAM_MECHANISM_INT,
					      IA64_MCA_RENDEZ_VECTOR,
					      timeout,
					      SAL_MC_PARAM_RZ_ALWAYS);
		rc = isrv.status;
		if (rc == 0)
			break;
		if (rc == -2) {
			printk(KERN_INFO "Increasing MCA rendezvous timeout from "
				"%ld to %ld milliseconds\n", timeout, isrv.v0);
			timeout = isrv.v0;
			continue;
		}
		printk(KERN_ERR "Failed to register rendezvous interrupt "
		       "with SAL (status %ld)\n", rc);
		return;
	}

	/* Register the wakeup interrupt vector with SAL */
	isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
				      SAL_MC_PARAM_MECHANISM_INT,
				      IA64_MCA_WAKEUP_VECTOR,
				      0, 0);
	rc = isrv.status;
	if (rc) {
		printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
		       "(status %ld)\n", rc);
		return;
	}

	IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__);
#endif /* !XEN */

	ia64_mc_info.imi_mca_handler        = ia64_tpa(mca_hldlr_ptr->fp);
	/*
	 * XXX - disable SAL checksum by setting size to 0; should be
	 *	ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
	 */
	ia64_mc_info.imi_mca_handler_size	= 0;

	/* Register the os mca handler with SAL */
	if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
				       ia64_mc_info.imi_mca_handler,
				       ia64_tpa(mca_hldlr_ptr->gp),
				       ia64_mc_info.imi_mca_handler_size,
				       0, 0, 0)))
	{
		printk(KERN_ERR "Failed to register OS MCA handler with SAL "
		       "(status %ld)\n", rc);
		return;
	}

	IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__,
		       ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));

	/*
	 * XXX - disable SAL checksum by setting size to 0, should be
	 * size of the actual init handler in mca_asm.S.
	 */
	ia64_mc_info.imi_monarch_init_handler		= ia64_tpa(mon_init_ptr->fp);
	ia64_mc_info.imi_monarch_init_handler_size	= 0;
	ia64_mc_info.imi_slave_init_handler		= ia64_tpa(slave_init_ptr->fp);
	ia64_mc_info.imi_slave_init_handler_size	= 0;

	IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__,
		       ia64_mc_info.imi_monarch_init_handler);

	/* Register the os init handler with SAL */
	if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
				       ia64_mc_info.imi_monarch_init_handler,
				       ia64_tpa(ia64_getreg(_IA64_REG_GP)),
				       ia64_mc_info.imi_monarch_init_handler_size,
				       ia64_mc_info.imi_slave_init_handler,
				       ia64_tpa(ia64_getreg(_IA64_REG_GP)),
				       ia64_mc_info.imi_slave_init_handler_size)))
	{
		printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
		       "(status %ld)\n", rc);
		return;
	}

	IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__);

#ifndef XEN
	/*
	 *  Configure the CMCI/P vector and handler. Interrupts for CMC are
	 *  per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
	 */
	register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
	register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
	ia64_mca_cmc_vector_setup();       /* Setup vector on BSP */

	/* Setup the MCA rendezvous interrupt vector */
	register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);

	/* Setup the MCA wakeup interrupt vector */
	register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);

#ifdef CONFIG_ACPI
	/* Setup the CPEI/P handler */
	register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
#endif

	/* Initialize the areas set aside by the OS to buffer the
	 * platform/processor error states for MCA/INIT/CMC
	 * handling.
	 */
	ia64_log_init(SAL_INFO_TYPE_MCA);
	ia64_log_init(SAL_INFO_TYPE_INIT);
	ia64_log_init(SAL_INFO_TYPE_CMC);
	ia64_log_init(SAL_INFO_TYPE_CPE);
#endif /* !XEN */

	mca_init = 1;
	printk(KERN_INFO "MCA related initialization done\n");
}

#ifndef XEN
/*
 * ia64_mca_late_init
 *
 *	Opportunity to setup things that require initialization later
 *	than ia64_mca_init.  Setup a timer to poll for CPEs if the
 *	platform doesn't support an interrupt driven mechanism.
 *
 *  Inputs  :   None
 *  Outputs :   Status
 */
static int __init
ia64_mca_late_init(void)
{
	if (!mca_init)
		return 0;

	/* Setup the CMCI/P vector and handler */
	init_timer(&cmc_poll_timer);
	cmc_poll_timer.function = ia64_mca_cmc_poll;

	/* Unmask/enable the vector */
	cmc_polling_enabled = 0;
	schedule_work(&cmc_enable_work);

	IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);

#ifdef CONFIG_ACPI
	/* Setup the CPEI/P vector and handler */
	cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
	init_timer(&cpe_poll_timer);
	cpe_poll_timer.function = ia64_mca_cpe_poll;

	{
		irq_desc_t *desc;
		unsigned int irq;

		if (cpe_vector >= 0) {
			/* If platform supports CPEI, enable the irq. */
			cpe_poll_enabled = 0;
			for (irq = 0; irq < NR_IRQS; ++irq)
				if (irq_to_vector(irq) == cpe_vector) {
					desc = irq_descp(irq);
					desc->status |= IRQ_PER_CPU;
					setup_irq(irq, &mca_cpe_irqaction);
				}
			ia64_mca_register_cpev(cpe_vector);
			IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__);
		} else {
			/* If platform doesn't support CPEI, get the timer going. */
			if (cpe_poll_enabled) {
				ia64_mca_cpe_poll(0UL);
				IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__);
			}
		}
	}
#endif

	return 0;
}

device_initcall(ia64_mca_late_init);
#endif /* !XEN */