/* * Architecture-specific setup. * * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co * David Mosberger-Tang * Stephane Eranian * Copyright (C) 2000, 2004 Intel Corp * Rohit Seth * Suresh Siddha * Gordon Jin * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond * * 12/26/04 S.Siddha, G.Jin, R.Seth * Add multi-threading and multi-core detection * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map * 03/31/00 R.Seth cpu_initialized and current->processor fixes * 02/04/00 D.Mosberger some more get_cpuinfo fixes... * 02/01/00 R.Seth fixed get_cpuinfo for SMP * 01/07/99 S.Eranian added the support for command line argument * 06/24/99 W.Drummond added boot_cpu_data. * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef XEN #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef XEN #include #include #include #include #include #endif #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) # error "struct cpuinfo_ia64 too big!" #endif #ifdef CONFIG_SMP unsigned long __per_cpu_offset[NR_CPUS]; EXPORT_SYMBOL(__per_cpu_offset); #endif DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); #ifdef XEN DEFINE_PER_CPU(cpu_kr_ia64_t, cpu_kr); #endif DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8); unsigned long ia64_cycles_per_usec; struct ia64_boot_param *ia64_boot_param; struct screen_info screen_info; unsigned long vga_console_iobase; unsigned long vga_console_membase; unsigned long ia64_max_cacheline_size; unsigned long ia64_iobase; /* virtual address for I/O accesses */ EXPORT_SYMBOL(ia64_iobase); struct io_space io_space[MAX_IO_SPACES]; EXPORT_SYMBOL(io_space); unsigned int num_io_spaces; #ifdef XEN extern void early_cmdline_parse(char **); extern unsigned int ns16550_com1_gsi; #endif /* * "flush_icache_range()" needs to know what processor dependent stride size to use * when it makes i-cache(s) coherent with d-caches. */ #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ unsigned long ia64_i_cache_stride_shift = ~0; #ifdef XEN #define D_CACHE_STRIDE_SHIFT 5 /* Safest. */ unsigned long ia64_d_cache_stride_shift = ~0; #endif /* * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This * mask specifies a mask of address bits that must be 0 in order for two buffers to be * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start * address of the second buffer must be aligned to (merge_mask+1) in order to be * mergeable). By default, we assume there is no I/O MMU which can merge physically * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu * page-size of 2^64. */ unsigned long ia64_max_iommu_merge_mask = ~0UL; EXPORT_SYMBOL(ia64_max_iommu_merge_mask); /* * We use a special marker for the end of memory and it uses the extra (+1) slot */ struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1]; int num_rsvd_regions; /* * Filter incoming memory segments based on the primitive map created from the boot * parameters. Segments contained in the map are removed from the memory ranges. A * caller-specified function is called with the memory ranges that remain after filtering. * This routine does not assume the incoming segments are sorted. */ int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) { unsigned long range_start, range_end, prev_start; void (*func)(unsigned long, unsigned long, int); int i; #if IGNORE_PFN0 if (start == PAGE_OFFSET) { printk(KERN_WARNING "warning: skipping physical page 0\n"); start += PAGE_SIZE; if (start >= end) return 0; } #endif /* * lowest possible address(walker uses virtual) */ prev_start = PAGE_OFFSET; func = arg; for (i = 0; i < num_rsvd_regions; ++i) { range_start = max(start, prev_start); range_end = min(end, rsvd_region[i].start); if (range_start < range_end) #ifdef XEN { /* init_boot_pages requires "ps, pe" */ printk("Init boot pages: 0x%lx -> 0x%lx.\n", __pa(range_start), __pa(range_end)); (*func)(__pa(range_start), __pa(range_end), 0); } #else call_pernode_memory(__pa(range_start), range_end - range_start, func); #endif /* nothing more available in this segment */ if (range_end == end) return 0; prev_start = rsvd_region[i].end; } /* end of memory marker allows full processing inside loop body */ return 0; } static void sort_regions (struct rsvd_region *rsvd_region, int max) { int j; /* simple bubble sorting */ while (max--) { for (j = 0; j < max; ++j) { if (rsvd_region[j].start > rsvd_region[j+1].start) { struct rsvd_region tmp; tmp = rsvd_region[j]; rsvd_region[j] = rsvd_region[j + 1]; rsvd_region[j + 1] = tmp; } } } } /** * reserve_memory - setup reserved memory areas * * Setup the reserved memory areas set aside for the boot parameters, * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, * see include/asm-ia64/meminit.h if you need to define more. */ void reserve_memory (void) { int n = 0; /* * none of the entries in this table overlap */ rsvd_region[n].start = (unsigned long) ia64_boot_param; rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); n++; rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; n++; rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); rsvd_region[n].end = (rsvd_region[n].start + str
/*
 * netlink/utils.h		Utility Functions
 *
 *	This library is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU Lesser General Public
 *	License as published by the Free Software Foundation version 2.1
 *	of the License.
 *
 * Copyright (c) 2003-2008 Thomas Graf <tgraf@suug.ch>
 */

#ifndef NETLINK_UTILS_H_
#define NETLINK_UTILS_H_

#include <netlink/netlink.h>
#include <netlink/list.h>

#ifdef __cplusplus
extern "C" {
#endif

/**
 * @name Probability Constants
 * @{
 */

/**
 * Lower probability limit
 * @ingroup utils
 */
#define NL_PROB_MIN 0x0

/**
 * Upper probability limit
 * @ingroup utils
 */
#define NL_PROB_MAX 0xffffffff

/** @} */

/* unit pretty-printing */
extern double	nl_cancel_down_bytes(unsigned long long, char **);
extern double	nl_cancel_down_bits(unsigned long long, char **);
extern double	nl_cancel_down_us(uint32_t, char **);

/* generic unit translations */
extern long	nl_size2int(const char *);
extern long	nl_prob2int(const char *);

/* time translations */
extern int	nl_get_hz(void);
extern uint32_t	nl_us2ticks(uint32_t);
extern uint32_t	nl_ticks2us(uint32_t);
extern int	nl_str2msec(const char *, uint64_t *);
extern char *	nl_msec2str(uint64_t, char *, size_t);

/* link layer protocol translations */
extern char *	nl_llproto2str(int, char *, size_t);
extern int	nl_str2llproto(const char *);

/* ethernet protocol translations */
extern char *	nl_ether_proto2str(int, char *, size_t);
extern int	nl_str2ether_proto(const char *);

/* IP protocol translations */
extern char *	nl_ip_proto2str(int, char *, size_t);
extern int	nl_str2ip_proto(const char *);

/* Dumping helpers */
extern void	nl_new_line(struct nl_dump_params *);
extern void	nl_dump(struct nl_dump_params *, const char *, ...);
extern void	nl_dump_line(struct nl_dump_params *, const char *, ...);

#ifdef __cplusplus
}
#endif

#endif
pal_vm_info_1_u_t vm1; pal_vm_info_2_u_t vm2; pal_status_t status; unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ int i; for (i = 0; i < 5; ++i) cpuid.bits[i] = ia64_get_cpuid(i); memcpy(c->vendor, cpuid.field.vendor, 16); #ifdef CONFIG_SMP c->cpu = smp_processor_id(); /* below default values will be overwritten by identify_siblings() * for Multi-Threading/Multi-Core capable cpu's */ c->threads_per_core = c->cores_per_socket = c->num_log = 1; c->socket_id = -1; identify_siblings(c); #endif c->ppn = cpuid.field.ppn; c->number = cpuid.field.number; c->revision = cpuid.field.revision; c->model = cpuid.field.model; c->family = cpuid.field.family; c->archrev = cpuid.field.archrev; c->features = cpuid.field.features; status = ia64_pal_vm_summary(&vm1, &vm2); if (status == PAL_STATUS_SUCCESS) { impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; } c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); #ifdef XEN /* If vmx feature is on, do necessary initialization for vmx */ if (vmx_enabled) vmx_init_env(); #endif } void setup_per_cpu_areas (void) { /* start_kernel() requires this... */ } /* * Calculate the max. cache line size. * * In addition, the minimum of the i-cache stride sizes is calculated for * "flush_icache_range()". */ static void get_max_cacheline_size (void) { unsigned long line_size, max = 1; u64 l, levels, unique_caches; pal_cache_config_info_t cci; s64 status; status = ia64_pal_cache_summary(&levels, &unique_caches); if (status != 0) { printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", __FUNCTION__, status); max = SMP_CACHE_BYTES; /* Safest setup for "flush_icache_range()" */ ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; #ifdef XEN ia64_d_cache_stride_shift = D_CACHE_STRIDE_SHIFT; #endif goto out; } for (l = 0; l < levels; ++l) { status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2, &cci); if (status != 0) { printk(KERN_ERR "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", __FUNCTION__, l, status); max = SMP_CACHE_BYTES; /* The safest setup for "flush_icache_range()" */ cci.pcci_stride = I_CACHE_STRIDE_SHIFT; cci.pcci_unified = 1; } #ifdef XEN if (cci.pcci_stride < ia64_d_cache_stride_shift) ia64_d_cache_stride_shift = cci.pcci_stride; #endif line_size = 1 << cci.pcci_line_size; if (line_size > max) max = line_size; if (!cci.pcci_unified) { status = ia64_pal_cache_config_info(l, /* cache_type (instruction)= */ 1, &cci); if (status != 0) { printk(KERN_ERR "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", __FUNCTION__, l, status); /* The safest setup for "flush_icache_range()" */ cci.pcci_stride = I_CACHE_STRIDE_SHIFT; } } if (cci.pcci_stride < ia64_i_cache_stride_shift) ia64_i_cache_stride_shift = cci.pcci_stride; } out: if (max > ia64_max_cacheline_size) ia64_max_cacheline_size = max; #ifdef XEN if (ia64_d_cache_stride_shift > ia64_i_cache_stride_shift) ia64_d_cache_stride_shift = ia64_i_cache_stride_shift; #endif } /* * cpu_init() initializes state that is per-CPU. This function acts * as a 'CPU state barrier', nothing should get across. */ void cpu_init (void) { extern void __devinit ia64_mmu_init (void *); unsigned long num_phys_stacked; #ifndef XEN pal_vm_info_2_u_t vmi; unsigned int max_ctx; #endif struct cpuinfo_ia64 *cpu_info; void *cpu_data; cpu_data = per_cpu_init(); #ifdef XEN printk(XENLOG_DEBUG "cpu_init: current=%p\n", current); #endif /* * We set ar.k3 so that assembly code in MCA handler can compute * physical addresses of per cpu variables with a simple: * phys = ar.k3 + &per_cpu_var */ ia64_set_kr(IA64_KR_PER_CPU_DATA, ia64_tpa(cpu_data) - (long) __per_cpu_start); get_max_cacheline_size(); /* * We can't pass "local_cpu_data" to identify_cpu() because we haven't called * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it * depends on the data returned by identify_cpu(). We break the dependency by * accessing cpu_data() through the canonical per-CPU address. */ cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); identify_cpu(cpu_info); #ifdef CONFIG_MCKINLEY { # define FEATURE_SET 16 struct ia64_pal_retval iprv; if (cpu_info->family == 0x1f) { PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, (iprv.v1 | 0x80), FEATURE_SET, 0); } } #endif /* Clear the stack memory reserved for pt_regs: */ memset(ia64_task_regs(current), 0, sizeof(struct pt_regs)); ia64_set_kr(IA64_KR_FPU_OWNER, 0); /* * Initialize the page-table base register to a global * directory with all zeroes. This ensure that we can handle * TLB-misses to user address-space even before we created the * first user address-space. This may happen, e.g., due to * aggressive use of lfetch.fault. */ ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); /* * Initialize default control register to defer speculative faults except * for those arising from TLB misses, which are not deferred. The * kernel MUST NOT depend on a particular setting of these bits (in other words, * the kernel must have recovery code for all speculative accesses). Turn on * dcr.lc as per recommendation by the architecture team. Most IA-32 apps * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll * be fine). */ #ifdef XEN ia64_setreg(_IA64_REG_CR_DCR, IA64_DEFAULT_DCR_BITS); #else ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); #endif #ifndef XEN atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; #endif #ifndef XEN if (current->mm) BUG(); #endif #ifdef XEN ia64_fph_enable(); __ia64_init_fpu(); #endif ia64_mmu_init(ia64_imva(cpu_data)); ia64_mca_cpu_init(ia64_imva(cpu_data)); #ifdef CONFIG_IA32_SUPPORT ia32_cpu_init(); #endif /* Clear ITC to eliminiate sched_clock() overflows in human time. */ ia64_set_itc(0); /* disable all local interrupt sources: */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* clear TPR & XTP to enable all interrupt classes: */ ia64_setreg(_IA64_REG_CR_TPR, 0); #ifdef CONFIG_SMP normal_xtp(); #endif #ifndef XEN /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ if (ia64_pal_vm_summary(NULL, &vmi) == 0) max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; else { printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); max_ctx = (1U << 15) - 1; /* use architected minimum */ } while (max_ctx < ia64_ctx.max_ctx) { unsigned int old = ia64_ctx.max_ctx; if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) break; } #endif if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " "stacked regs\n"); num_phys_stacked = 96; } /* size of physical stacked register partition plus 8 bytes: */ __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; platform_cpu_init(); #ifndef XEN pm_idle = default_idle; #endif #ifdef XEN /* surrender usage of kernel registers to domain, use percpu area instead */ __get_cpu_var(cpu_kr)._kr[IA64_KR_IO_BASE] = ia64_get_kr(IA64_KR_IO_BASE); __get_cpu_var(cpu_kr)._kr[IA64_KR_PER_CPU_DATA] = ia64_get_kr(IA64_KR_PER_CPU_DATA); __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT_STACK] = ia64_get_kr(IA64_KR_CURRENT_STACK); __get_cpu_var(cpu_kr)._kr[IA64_KR_FPU_OWNER] = ia64_get_kr(IA64_KR_FPU_OWNER); __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT] = ia64_get_kr(IA64_KR_CURRENT); __get_cpu_var(cpu_kr)._kr[IA64_KR_PT_BASE] = ia64_get_kr(IA64_KR_PT_BASE); #endif } #ifndef XEN void check_bugs (void) { ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, (unsigned long) __end___mckinley_e9_bundles); } #endif