aboutsummaryrefslogtreecommitdiffstats
path: root/test/testthd.c
blob: 08b792ce48d17ee66f833e0d58c859be5e73f491 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
/*
    ChibiOS/RT - Copyright (C) 2006,2007,2008,2009,2010 Giovanni Di Sirio.

    This file is part of ChibiOS/RT.

    ChibiOS/RT is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 3 of the License, or
    (at your option) any later version.

    ChibiOS/RT is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
*/

#include "ch.h"
#include "test.h"

/**
 * @page test_threads Threads and Scheduler test
 *
 * <h2>Description</h2>
 * This module implements the test sequence for the @ref scheduler,
 * @ref threads and @ref time subsystems.<br>
 * Note that the tests on those subsystems are formally required but most of
 * their functionality is already demonstrated because the test suite itself
 * depends on them, anyway double check is good.
 *
 * <h2>Objective</h2>
 * Objective of the test module is to cover 100% of the subsystems code.
 *
 * <h2>Preconditions</h2>
 * None.
 *
 * <h2>Test Cases</h2>
 * - @subpage test_threads_001
 * - @subpage test_threads_002
 * - @subpage test_threads_003
 * - @subpage test_threads_004
 * .
 * @file testthd.c
 * @brief Threads and Scheduler test source file
 * @file testthd.h
 * @brief Threads and Scheduler test header file
 */

/**
 * @page test_threads_001 Ready List functionality #1
 *
 * <h2>Description</h2>
 * Five threads, with increasing priority, are enqueued in the ready list
 * and atomically executed.<br>
 * The test expects the threads to perform their operations in increasing
 * priority order regardless of the initial order.
 */

static msg_t thread(void *p) {

  test_emit_token(*(char *)p);
  return 0;
}

static char *thd1_gettest(void) {

  return "Threads, enqueuing test #1";
}

static void thd1_execute(void) {

  threads[0] = chThdCreateStatic(wa[0], WA_SIZE, chThdGetPriority()-5, thread, "E");
  threads[1] = chThdCreateStatic(wa[1], WA_SIZE, chThdGetPriority()-4, thread, "D");
  threads[2] = chThdCreateStatic(wa[2], WA_SIZE, chThdGetPriority()-3, thread, "C");
  threads[3] = chThdCreateStatic(wa[3], WA_SIZE, chThdGetPriority()-2, thread, "B");
  threads[4] = chThdCreateStatic(wa[4], WA_SIZE, chThdGetPriority()-1, thread, "A");
  test_wait_threads();
  test_assert_sequence(1, "ABCDE");
}

const struct testcase testthd1 = {
  thd1_gettest,
  NULL,
  NULL,
  thd1_execute
};

/**
 * @page test_threads_002 Ready List functionality #2
 *
 * <h2>Description</h2>
 * Five threads, with pseudo-random priority, are enqueued in the ready list
 * and atomically executed.<br>
 * The test expects the threads to perform their operations in increasing
 * priority order regardless of the initial order.
 */

static char *thd2_gettest(void) {

  return "Threads, enqueuing test #2";
}

static void thd2_execute(void) {

  threads[1] = chThdCreateStatic(wa[1], WA_SIZE, chThdGetPriority()-4, thread, "D");
  threads[0] = chThdCreateStatic(wa[0], WA_SIZE, chThdGetPriority()-5, thread, "E");
  threads[4] = chThdCreateStatic(wa[4], WA_SIZE, chThdGetPriority()-1, thread, "A");
  threads[3] = chThdCreateStatic(wa[3], WA_SIZE, chThdGetPriority()-2, thread, "B");
  threads[2] = chThdCreateStatic(wa[2], WA_SIZE, chThdGetPriority()-3, thread, "C");
  test_wait_threads();
  test_assert_sequence(1, "ABCDE");
}

const struct testcase testthd2 = {
  thd2_gettest,
  NULL,
  NULL,
  thd2_execute
};

/**
 * @page test_threads_003 Threads priority change test
 *
 * <h2>Description</h2>
 * A series of priority changes are performed on the current thread in order
 * to verify that the priority change happens as expected.<br>
 * If the @p CH_USE_MUTEXES option is enabled then the priority changes are
 * also tested under priority inheritance boosted priority state.
 */

static char *thd3_gettest(void) {

  return "Threads, priority change";
}

static void thd3_execute(void) {
  tprio_t prio, p1;

  prio = chThdGetPriority();
  p1 = chThdSetPriority(prio + 1);
  test_assert(1, p1 == prio,
              "unexpected returned priority level");
  test_assert(2, chThdGetPriority() == prio + 1,
              "unexpected priority level");
  p1 = chThdSetPriority(p1);
  test_assert(3, p1 == prio + 1,
              "unexpected returned priority level");
  test_assert(4, chThdGetPriority() == prio,
              "unexpected priority level");

#if CH_USE_MUTEXES
  /* Simulates a priority boost situation (p_prio > p_realprio).*/
  chSysLock();
  chThdSelf()->p_prio += 2;
  chSysUnlock();
  test_assert(5, chThdGetPriority() == prio + 2,
              "unexpected priority level");

  /* Tries to raise but below the boost level. */
  p1 = chThdSetPriority(prio + 1);
  test_assert(6, p1 == prio,
              "unexpected returned priority level");
  test_assert(7, chThdSelf()->p_prio == prio + 2,
              "unexpected priority level");
  test_assert(8, chThdSelf()->p_realprio == prio + 1,
              "unexpected returned real priority level");

  /* Tries to raise above the boost level. */
  p1 = chThdSetPriority(prio + 3);
  test_assert(9, p1 == prio + 1,
              "unexpected returned priority level");
  test_assert(10, chThdSelf()->p_prio == prio + 3,
              "unexpected priority level");
  test_assert(11, chThdSelf()->p_realprio == prio + 3,
              "unexpected real priority level");

  chSysLock();
  chThdSelf()->p_prio = prio;
  chThdSelf()->p_realprio = prio;
  chSysUnlock();
#endif
}

const struct testcase testthd3 = {
  thd3_gettest,
  NULL,
  NULL,
  thd3_execute
};

/**
 * @page test_threads_004 Threads delays test
 *
 * <h2>Description</h2>
 * Delay APIs and associated macros are tested, the invoking thread is verified
 * to wake up at the exact expected time.
 */

static char *thd4_gettest(void) {

  return "Threads, delays";
}

static void thd4_execute(void) {
  systime_t time;

  test_wait_tick();

  /* Timeouts in microseconds.*/
  time = chTimeNow();
  chThdSleepMicroseconds(100000);
  test_assert_time_window(1, time + US2ST(100000), time + US2ST(100000) + 1);

  /* Timeouts in milliseconds.*/
  time = chTimeNow();
  chThdSleepMilliseconds(100);
  test_assert_time_window(2, time + MS2ST(100), time + MS2ST(100) + 1);

  /* Timeouts in seconds.*/
  time = chTimeNow();
  chThdSleepSeconds(1);
  test_assert_time_window(3, time + S2ST(1), time + S2ST(1) + 1);

  /* Absolute timelines.*/
  time = chTimeNow() + MS2ST(100);
  chThdSleepUntil(time);
  test_assert_time_window(4, time, time + 1);
}

const struct testcase testthd4 = {
  thd4_gettest,
  NULL,
  NULL,
  thd4_execute
};

/*
 * Test sequence for threads patterns.
 */
const struct testcase * const patternthd[] = {
  &testthd1,
  &testthd2,
  &testthd3,
  &testthd4,
  NULL
};
pan class="p">} return "dig"; /* #else # if defined (CONFIG_IA64_HP_SIM) return "hpsim"; # elif defined (CONFIG_IA64_HP_ZX1) return "hpzx1"; # elif defined (CONFIG_IA64_SGI_SN2) return "sn2"; # elif defined (CONFIG_IA64_DIG) return "dig"; # else # error Unknown platform. Fix acpi.c. # endif #endif */ } #ifdef CONFIG_ACPI_BOOT #define ACPI_MAX_PLATFORM_INTERRUPTS 256 /* Array to record platform interrupt vectors for generic interrupt routing. */ int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = { [0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1 }; enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC; /* * Interrupt routing API for device drivers. Provides interrupt vector for * a generic platform event. Currently only CPEI is implemented. */ int acpi_request_vector (u32 int_type) { int vector = -1; if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) { /* corrected platform error interrupt */ vector = platform_intr_list[int_type]; } else printk(KERN_ERR "acpi_request_vector(): invalid interrupt type\n"); return vector; } char * __acpi_map_table (unsigned long phys_addr, unsigned long size) { return __va(phys_addr); } /* -------------------------------------------------------------------------- Boot-time Table Parsing -------------------------------------------------------------------------- */ static int total_cpus __initdata; static int available_cpus __initdata; struct acpi_table_madt * acpi_madt __initdata; static u8 has_8259; static int __init acpi_parse_lapic_addr_ovr ( acpi_table_entry_header *header, const unsigned long end) { struct acpi_table_lapic_addr_ovr *lapic; lapic = (struct acpi_table_lapic_addr_ovr *) header; if (BAD_MADT_ENTRY(lapic, end)) return -EINVAL; acpi_table_print_madt_entry(header); if (lapic->address) { iounmap((void *) ipi_base_addr); ipi_base_addr = (void __iomem *) ioremap(lapic->address, 0); } return 0; } static int __init acpi_parse_lsapic (acpi_table_entry_header *header, const unsigned long end) { struct acpi_table_lsapic *lsapic; lsapic = (struct acpi_table_lsapic *) header; if (BAD_MADT_ENTRY(lsapic, end)) return -EINVAL; acpi_table_print_madt_entry(header); printk(KERN_INFO "CPU %d (0x%04x)", total_cpus, (lsapic->id << 8) | lsapic->eid); if (!lsapic->flags.enabled) printk(" disabled"); else { printk(" enabled"); #ifdef CONFIG_SMP if (available_cpus < NR_CPUS) { smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid; if (hard_smp_processor_id() == (unsigned int) smp_boot_data.cpu_phys_id[available_cpus]) printk(" (BSP)"); ++available_cpus; } else { printk(" - however, ignored..."); } #else ++available_cpus; #endif } printk("\n"); total_cpus++; return 0; } static int __init acpi_parse_lapic_nmi (acpi_table_entry_header *header, const unsigned long end) { struct acpi_table_lapic_nmi *lacpi_nmi; lacpi_nmi = (struct acpi_table_lapic_nmi*) header; if (BAD_MADT_ENTRY(lacpi_nmi, end)) return -EINVAL; acpi_table_print_madt_entry(header); /* TBD: Support lapic_nmi entries */ return 0; } static int __init acpi_parse_iosapic (acpi_table_entry_header *header, const unsigned long end) { struct acpi_table_iosapic *iosapic; iosapic = (struct acpi_table_iosapic *) header; if (BAD_MADT_ENTRY(iosapic, end)) return -EINVAL; acpi_table_print_madt_entry(header); iosapic_init(iosapic->address, iosapic->global_irq_base); return 0; } static int __init acpi_parse_plat_int_src ( acpi_table_entry_header *header, const unsigned long end) { struct acpi_table_plat_int_src *plintsrc; int vector; plintsrc = (struct acpi_table_plat_int_src *) header; if (BAD_MADT_ENTRY(plintsrc, end)) return -EINVAL; acpi_table_print_madt_entry(header); /* * Get vector assignment for this interrupt, set attributes, * and program the IOSAPIC routing table. */ vector = iosapic_register_platform_intr(plintsrc->type, plintsrc->global_irq, plintsrc->iosapic_vector, plintsrc->eid, plintsrc->id, (plintsrc->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, (plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); platform_intr_list[plintsrc->type] = vector; return 0; } static int __init acpi_parse_int_src_ovr ( acpi_table_entry_header *header, const unsigned long end) { struct acpi_table_int_src_ovr *p; p = (struct acpi_table_int_src_ovr *) header; if (BAD_MADT_ENTRY(p, end)) return -EINVAL; acpi_table_print_madt_entry(header); iosapic_override_isa_irq(p->bus_irq, p->global_irq, (p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, (p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); return 0; } static int __init acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end) { struct acpi_table_nmi_src *nmi_src; nmi_src = (struct acpi_table_nmi_src*) header; if (BAD_MADT_ENTRY(nmi_src, end)) return -EINVAL; acpi_table_print_madt_entry(header); /* TBD: Support nimsrc entries */ return 0; } /* Hook from generic ACPI tables.c */ void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) { if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERMOW", 6))){ /* Unfortunatly ITC_DRIFT is not yet part of the * official SAL spec, so the ITC_DRIFT bit is not * set by the BIOS on this hardware. */ sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT; #if 0 /*Start cyclone clock*/ cyclone_setup(0); #endif } } static int __init acpi_parse_madt (unsigned long phys_addr, unsigned long size) { if (!phys_addr || !size) return -EINVAL; acpi_madt = (struct acpi_table_madt *) __va(phys_addr); /* remember the value for reference after free_initmem() */ #ifdef CONFIG_ITANIUM has_8259 = 1; /* Firmware on old Itanium systems is broken */ #else has_8259 = acpi_madt->flags.pcat_compat; #endif iosapic_system_init(has_8259); /* Get base address of IPI Message Block */ if (acpi_madt->lapic_address) ipi_base_addr = (void __iomem *) ioremap(acpi_madt->lapic_address, 0); printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr); acpi_madt_oem_check(acpi_madt->header.oem_id, acpi_madt->header.oem_table_id); return 0; } #ifdef CONFIG_ACPI_NUMA #undef SLIT_DEBUG #define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32) static int __initdata srat_num_cpus; /* number of cpus */ static u32 __initdata pxm_flag[PXM_FLAG_LEN]; #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) /* maps to convert between proximity domain and logical node ID */ int __initdata pxm_to_nid_map[MAX_PXM_DOMAINS]; int __initdata nid_to_pxm_map[MAX_NUMNODES]; static struct acpi_table_slit __initdata *slit_table; /* * ACPI 2.0 SLIT (System Locality Information Table) * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf */ void __init acpi_numa_slit_init (struct acpi_table_slit *slit) { u32 len; len = sizeof(struct acpi_table_header) + 8 + slit->localities * slit->localities; if (slit->header.length != len) { printk(KERN_ERR "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", len, slit->header.length); memset(numa_slit, 10, sizeof(numa_slit)); return; } slit_table = slit; } void __init acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa) { /* record this node in proximity bitmap */ pxm_bit_set(pa->proximity_domain); node_cpuid[srat_num_cpus].phys_id = (pa->apic_id << 8) | (pa->lsapic_eid); /* nid should be overridden as logical node id later */ node_cpuid[srat_num_cpus].nid = pa->proximity_domain; srat_num_cpus++; } void __init acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma) { unsigned long paddr, size; u8 pxm; struct node_memblk_s *p, *q, *pend; pxm = ma->proximity_domain; /* fill node memory chunk structure */ paddr = ma->base_addr_hi; paddr = (paddr << 32) | ma->base_addr_lo; size = ma->length_hi; size = (size << 32) | ma->length_lo; /* Ignore disabled entries */ if (!ma->flags.enabled) return; /* record this node in proximity bitmap */ pxm_bit_set(pxm); /* Insertion sort based on base address */ pend = &node_memblk[num_node_memblks]; for (p = &node_memblk[0]; p < pend; p++) { if (paddr < p->start_paddr) break; } if (p < pend) { for (q = pend - 1; q >= p; q--) *(q + 1) = *q; } p->start_paddr = paddr; p->size = size; p->nid = pxm; num_node_memblks++; } static unsigned int numnodes; void __init acpi_numa_arch_fixup (void) { int i, j, node_from, node_to; /* If there's no SRAT, fix the phys_id */ if (srat_num_cpus == 0) { node_cpuid[0].phys_id = hard_smp_processor_id(); return; } /* calculate total number of nodes in system from PXM bitmap */ numnodes = 0; /* init total nodes in system */ memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map)); memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map)); for (i = 0; i < MAX_PXM_DOMAINS; i++) { if (pxm_bit_test(i)) { pxm_to_nid_map[i] = numnodes; node_set_online(numnodes); nid_to_pxm_map[numnodes++] = i; } } /* set logical node id in memory chunk structure */ for (i = 0; i < num_node_memblks; i++) node_memblk[i].nid = pxm_to_nid_map[node_memblk[i].nid]; /* assign memory bank numbers for each chunk on each node */ for (i = 0; i < numnodes; i++) { int bank; bank = 0; for (j = 0; j < num_node_memblks; j++) if (node_memblk[j].nid == i) node_memblk[j].bank = bank++; } /* set logical node id in cpu structure */ for (i = 0; i < srat_num_cpus; i++) node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid]; printk(KERN_INFO "Number of logical nodes in system = %d\n", numnodes); printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks); if (!slit_table) return; memset(numa_slit, -1, sizeof(numa_slit)); for (i=0; i<slit_table->localities; i++) { if (!pxm_bit_test(i)) continue; node_from = pxm_to_nid_map[i]; for (j=0; j<slit_table->localities; j++) { if (!pxm_bit_test(j)) continue; node_to = pxm_to_nid_map[j]; node_distance(node_from, node_to) = slit_table->entry[i*slit_table->localities + j]; } } #ifdef SLIT_DEBUG printk("ACPI 2.0 SLIT locality table:\n"); for (i = 0; i < numnodes; i++) { for (j = 0; j < numnodes; j++) printk("%03d ", node_distance(i,j)); printk("\n"); } #endif } #endif /* CONFIG_ACPI_NUMA */ #if 0 unsigned int acpi_register_gsi (u32 gsi, int polarity, int trigger) { return acpi_register_irq(gsi, polarity, trigger); } EXPORT_SYMBOL(acpi_register_gsi); #endif static int __init acpi_parse_fadt (unsigned long phys_addr, unsigned long size) { struct acpi_table_header *fadt_header; struct fadt_descriptor_rev2 *fadt; if (!phys_addr || !size) return -EINVAL; fadt_header = (struct acpi_table_header *) __va(phys_addr); if (fadt_header->revision != 3) return -ENODEV; /* Only deal with ACPI 2.0 FADT */ fadt = (struct fadt_descriptor_rev2 *) fadt_header; if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER)) acpi_kbd_controller_present = 0; if (fadt->iapc_boot_arch & BAF_LEGACY_DEVICES) acpi_legacy_devices = 1; #if 0 acpi_register_gsi(fadt->sci_int, ACPI_ACTIVE_LOW, ACPI_LEVEL_SENSITIVE); #endif return 0; } unsigned long __init acpi_find_rsdp (void) { unsigned long rsdp_phys = 0; if (efi.acpi20) rsdp_phys = __pa(efi.acpi20); else if (efi.acpi) printk(KERN_WARNING PREFIX "v1.0/r0.71 tables no longer supported\n"); return rsdp_phys; } int __init acpi_boot_init (void) { /* * MADT * ---- * Parse the Multiple APIC Description Table (MADT), if exists. * Note that this table provides platform SMP configuration * information -- the successor to MPS tables. */ if (acpi_table_parse(ACPI_APIC, acpi_parse_madt) < 1) { printk(KERN_ERR PREFIX "Can't find MADT\n"); goto skip_madt; } /* Local APIC */ if (acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0) printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS) < 1) printk(KERN_ERR PREFIX "Error parsing MADT - no LSAPIC entries\n"); if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0) < 0) printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); /* I/O APIC */ if (acpi_table_parse_madt(ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n"); /* System-Level Interrupt Routing */ if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src, ACPI_MAX_PLATFORM_INTERRUPTS) < 0) printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n"); if (acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0) printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0) printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); skip_madt: /* * FADT says whether a legacy keyboard controller is present. * The FADT also contains an SCI_INT line, by which the system * gets interrupts such as power and sleep buttons. If it's not * on a Legacy interrupt, it needs to be setup. */ if (acpi_table_parse(ACPI_FADT, acpi_parse_fadt) < 1) printk(KERN_ERR PREFIX "Can't find FADT\n"); #ifdef CONFIG_SMP if (available_cpus == 0) { printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); printk(KERN_INFO "CPU 0 (0x%04x)\n", hard_smp_processor_id()); smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id(); available_cpus = 1; /* We've got at least one of these, no? */ } smp_boot_data.cpu_count = available_cpus; smp_build_cpu_map(); # ifdef CONFIG_ACPI_NUMA if (srat_num_cpus == 0) { int cpu, i = 1; for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; } build_cpu_to_node_map(); # endif #endif /* Make boot-up look pretty */ printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); return 0; } int acpi_gsi_to_irq (u32 gsi, unsigned int *irq) { int vector; if (has_8259 && gsi < 16) *irq = isa_irq_to_vector(gsi); else { vector = gsi_to_vector(gsi); if (vector == -1) return -1; *irq = vector; } return 0; } #if 0 int acpi_register_irq (u32 gsi, u32 polarity, u32 trigger) { if (has_8259 && gsi < 16) return isa_irq_to_vector(gsi); return iosapic_register_intr(gsi, (polarity == ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, (trigger == ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); } EXPORT_SYMBOL(acpi_register_irq); #endif #endif /* CONFIG_ACPI_BOOT */