aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/srat.c
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2006-10-25 12:25:54 +0100
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2006-10-25 12:25:54 +0100
commit582724b0ea12d27f3a18c8c3c15cccf7d655dae6 (patch)
tree7ab46ccde55050f9725b8446edfe48d2ae5ea316 /xen/arch/x86/srat.c
parent67ce9409e141bb4949f2f037e937ef3fc18a8608 (diff)
downloadxen-582724b0ea12d27f3a18c8c3c15cccf7d655dae6.tar.gz
xen-582724b0ea12d27f3a18c8c3c15cccf7d655dae6.tar.bz2
xen-582724b0ea12d27f3a18c8c3c15cccf7d655dae6.zip
[XEN] Add basic NUMA/SRAT support to Xen from Linux 2.6.16.29.
Signed-off-by: Ryan Harper <ryanh@us.ibm.com>
Diffstat (limited to 'xen/arch/x86/srat.c')
-rw-r--r--xen/arch/x86/srat.c325
1 files changed, 325 insertions, 0 deletions
diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c
new file mode 100644
index 0000000000..dfa85b2539
--- /dev/null
+++ b/xen/arch/x86/srat.c
@@ -0,0 +1,325 @@
+/*
+ * ACPI 3.0 based NUMA setup
+ * Copyright 2004 Andi Kleen, SuSE Labs.
+ *
+ * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
+ *
+ * Called from acpi_numa_init while reading the SRAT and SLIT tables.
+ * Assumes all memory regions belonging to a single proximity domain
+ * are in one chunk. Holes between them will be included in the node.
+ *
+ * Adapted for Xen: Ryan Harper <ryanh@us.ibm.com>
+ */
+
+#if 0
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/proto.h>
+#include <xen/bitmap.h>
+#include <xen/numa.h>
+#include <xen/topology.h>
+#include <asm/e820.h>
+#endif
+#include <xen/init.h>
+#include <xen/mm.h>
+#include <xen/inttypes.h>
+#include <xen/nodemask.h>
+#include <xen/acpi.h>
+
+#include <asm/numa.h>
+#include <asm/page.h>
+
+static struct acpi_table_slit *acpi_slit;
+
+static nodemask_t nodes_parsed __initdata;
+static nodemask_t nodes_found __initdata;
+static struct node nodes[MAX_NUMNODES] __initdata;
+static u8 pxm2node[256] = { [0 ... 255] = 0xff };
+
+/* Too small nodes confuse the VM badly. Usually they result
+ from BIOS bugs. */
+#define NODE_MIN_SIZE (4*1024*1024)
+
+static int node_to_pxm(int n);
+
+int pxm_to_node(int pxm)
+{
+ if ((unsigned)pxm >= 256)
+ return -1;
+ /* Extend 0xff to (int)-1 */
+ return (signed char)pxm2node[pxm];
+}
+
+static __init int setup_node(int pxm)
+{
+ unsigned node = pxm2node[pxm];
+ if (node == 0xff) {
+ if (nodes_weight(nodes_found) >= MAX_NUMNODES)
+ return -1;
+ node = first_unset_node(nodes_found);
+ node_set(node, nodes_found);
+ pxm2node[pxm] = node;
+ }
+ return pxm2node[pxm];
+}
+
+static __init int conflicting_nodes(u64 start, u64 end)
+{
+ int i;
+ for_each_node_mask(i, nodes_parsed) {
+ struct node *nd = &nodes[i];
+ if (nd->start == nd->end)
+ continue;
+ if (nd->end > start && nd->start < end)
+ return i;
+ if (nd->end == end && nd->start == start)
+ return i;
+ }
+ return -1;
+}
+
+static __init void cutoff_node(int i, u64 start, u64 end)
+{
+ struct node *nd = &nodes[i];
+ if (nd->start < start) {
+ nd->start = start;
+ if (nd->end < nd->start)
+ nd->start = nd->end;
+ }
+ if (nd->end > end) {
+ nd->end = end;
+ if (nd->start > nd->end)
+ nd->start = nd->end;
+ }
+}
+
+static __init void bad_srat(void)
+{
+ int i;
+ printk(KERN_ERR "SRAT: SRAT not used.\n");
+ acpi_numa = -1;
+ for (i = 0; i < MAX_LOCAL_APIC; i++)
+ apicid_to_node[i] = NUMA_NO_NODE;
+}
+
+static __init inline int srat_disabled(void)
+{
+ return numa_off || acpi_numa < 0;
+}
+
+/*
+ * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
+ * up the NUMA heuristics which wants the local node to have a smaller
+ * distance than the others.
+ * Do some quick checks here and only use the SLIT if it passes.
+ */
+static __init int slit_valid(struct acpi_table_slit *slit)
+{
+ int i, j;
+ int d = slit->localities;
+ for (i = 0; i < d; i++) {
+ for (j = 0; j < d; j++) {
+ u8 val = slit->entry[d*i + j];
+ if (i == j) {
+ if (val != 10)
+ return 0;
+ } else if (val <= 10)
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/* Callback for SLIT parsing */
+void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
+{
+ if (!slit_valid(slit)) {
+ printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
+ return;
+ }
+ acpi_slit = slit;
+}
+
+/* Callback for Proximity Domain -> LAPIC mapping */
+void __init
+acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
+{
+ int pxm, node;
+ if (srat_disabled())
+ return;
+ if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { bad_srat();
+ return;
+ }
+ if (pa->flags.enabled == 0)
+ return;
+ pxm = pa->proximity_domain;
+ node = setup_node(pxm);
+ if (node < 0) {
+ printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
+ bad_srat();
+ return;
+ }
+ apicid_to_node[pa->apic_id] = node;
+ acpi_numa = 1;
+ printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
+ pxm, pa->apic_id, node);
+}
+
+/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
+void __init
+acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
+{
+ struct node *nd;
+ u64 start, end;
+ int node, pxm;
+ int i;
+
+ if (srat_disabled())
+ return;
+ if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) {
+ bad_srat();
+ return;
+ }
+ if (ma->flags.enabled == 0)
+ return;
+ start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
+ end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
+ pxm = ma->proximity_domain;
+ node = setup_node(pxm);
+ if (node < 0) {
+ printk(KERN_ERR "SRAT: Too many proximity domains.\n");
+ bad_srat();
+ return;
+ }
+ /* It is fine to add this area to the nodes data it will be used later*/
+ if (ma->flags.hot_pluggable == 1)
+ printk(KERN_INFO "SRAT: hot plug zone found %"PRIx64" - %"PRIx64" \n",
+ start, end);
+ i = conflicting_nodes(start, end);
+ if (i == node) {
+ printk(KERN_WARNING
+ "SRAT: Warning: PXM %d (%"PRIx64"-%"PRIx64") overlaps with itself (%"
+ PRIx64"-%"PRIx64")\n", pxm, start, end, nodes[i].start, nodes[i].end);
+ } else if (i >= 0) {
+ printk(KERN_ERR
+ "SRAT: PXM %d (%"PRIx64"-%"PRIx64") overlaps with PXM %d (%"
+ PRIx64"-%"PRIx64")\n", pxm, start, end, node_to_pxm(i),
+ nodes[i].start, nodes[i].end);
+ bad_srat();
+ return;
+ }
+ nd = &nodes[node];
+ if (!node_test_and_set(node, nodes_parsed)) {
+ nd->start = start;
+ nd->end = end;
+ } else {
+ if (start < nd->start)
+ nd->start = start;
+ if (nd->end < end)
+ nd->end = end;
+ }
+ printk(KERN_INFO "SRAT: Node %u PXM %u %"PRIx64"-%"PRIx64"\n", node, pxm,
+ nd->start, nd->end);
+}
+
+/* Sanity check to catch more bad SRATs (they are amazingly common).
+ Make sure the PXMs cover all memory. */
+static int nodes_cover_memory(void)
+{
+ int i;
+ u64 pxmram, e820ram;
+
+ pxmram = 0;
+ for_each_node_mask(i, nodes_parsed) {
+ u64 s = nodes[i].start >> PAGE_SHIFT;
+ u64 e = nodes[i].end >> PAGE_SHIFT;
+ pxmram += e - s;
+ }
+
+ e820ram = max_page;
+ /* We seem to lose 3 pages somewhere. Allow a bit of slack. */
+ if ((long)(e820ram - pxmram) >= 1*1024*1024) {
+ printk(KERN_ERR "SRAT: PXMs only cover %"PRIu64"MB of your %"
+ PRIu64"MB e820 RAM. Not used.\n",
+ (pxmram << PAGE_SHIFT) >> 20,
+ (e820ram << PAGE_SHIFT) >> 20);
+ return 0;
+ }
+ return 1;
+}
+
+static void unparse_node(int node)
+{
+ int i;
+ node_clear(node, nodes_parsed);
+ for (i = 0; i < MAX_LOCAL_APIC; i++) {
+ if (apicid_to_node[i] == node)
+ apicid_to_node[i] = NUMA_NO_NODE;
+ }
+}
+
+void __init acpi_numa_arch_fixup(void) {}
+
+/* Use the information discovered above to actually set up the nodes. */
+int __init acpi_scan_nodes(u64 start, u64 end)
+{
+ int i;
+
+ /* First clean up the node list */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ cutoff_node(i, start, end);
+ if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE)
+ unparse_node(i);
+ }
+
+ if (acpi_numa <= 0)
+ return -1;
+
+ if (!nodes_cover_memory()) {
+ bad_srat();
+ return -1;
+ }
+
+ memnode_shift = compute_hash_shift(nodes, MAX_NUMNODES);
+ if (memnode_shift < 0) {
+ printk(KERN_ERR
+ "SRAT: No NUMA node hash function found. Contact maintainer\n");
+ bad_srat();
+ return -1;
+ }
+
+ /* Finally register nodes */
+ for_each_node_mask(i, nodes_parsed)
+ setup_node_bootmem(i, nodes[i].start, nodes[i].end);
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_to_node[i] == NUMA_NO_NODE)
+ continue;
+ if (!node_isset(cpu_to_node[i], nodes_parsed))
+ numa_set_node(i, NUMA_NO_NODE);
+ }
+ numa_init_array();
+ return 0;
+}
+
+static int node_to_pxm(int n)
+{
+ int i;
+ if (pxm2node[n] == n)
+ return n;
+ for (i = 0; i < 256; i++)
+ if (pxm2node[i] == n)
+ return i;
+ return 0;
+}
+
+int __node_distance(int a, int b)
+{
+ int index;
+
+ if (!acpi_slit)
+ return a == b ? 10 : 20;
+ index = acpi_slit->localities * node_to_pxm(a);
+ return acpi_slit->entry[index + node_to_pxm(b)];
+}
+
+EXPORT_SYMBOL(__node_distance);