/****************************************************************************** * page_alloc.c * * Simple buddy heap allocator for Xen. * * Copyright (c) 2002-2004 K A Fraser * Copyright (c) 2006 IBM Ryan Harper * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Comma-separated list of hexadecimal page numbers containing bad bytes. * e.g. 'badpage=0x3f45,0x8a321'. */ static char opt_badpage[100] = ""; string_param("badpage", opt_badpage); /* * Amount of memory to reserve in a low-memory (<4GB) pool for specific * allocation requests. Ordinary requests will not fall back to the * lowmem emergency pool. */ static unsigned long lowmem_emergency_pool_pages; static void parse_lowmem_emergency_pool(char *s) { unsigned long long bytes; bytes = parse_size_and_unit(s); lowmem_emergency_pool_pages = bytes >> PAGE_SHIFT; } custom_param("lowmem_emergency_pool", parse_lowmem_emergency_pool); #define round_pgdown(_p) ((_p)&PAGE_MASK) #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK) static DEFINE_SPINLOCK(page_scrub_lock); LIST_HEAD(page_scrub_list); static unsigned long scrub_pages; /********************* * ALLOCATION BITMAP * One bit per page of memory. Bit set => page is allocated. */ static unsigned long *alloc_bitmap; #define PAGES_PER_MAPWORD (sizeof(unsigned long) * 8) #define allocated_in_map(_pn) \ ( !! (alloc_bitmap[(_pn)/PAGES_PER_MAPWORD] & \ (1UL<<((_pn)&(PAGES_PER_MAPWORD-1)))) ) /* * Hint regarding bitwise arithmetic in map_{alloc,free}: * -(1<= n. * (1<pre { line-height: 125%; margin: 0; } td.linenos pre { color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px; } span.linenos { color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px; } td.linenos pre.special { color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px; } span.linenos.special { color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px; } .highlight .hll { background-color: #ffffcc } .highlight { background: #ffffff; } .highlight .c { color: #888888 } /* Comment */ .highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */ .highlight .k { color: #008800; font-weight: bold } /* Keyword */ .highlight .ch { color: #888888 } /* Comment.Hashbang */ .highlight .cm { color: #888888 } /* Comment.Multiline */ .highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */ .highlight .cpf { color: #888888 } /* Comment.PreprocFile */ .highlight .c1 { color: #888888 } /* Comment.Single */ .highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */ .highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */ .highlight .ge { font-style: italic } /* Generic.Emph */ .highlight .gr { color: #aa0000 } /* Generic.Error */ .highlight .gh { color: #333333 } /* Generic.Heading */ .highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */ .highlight .go { color: #888888 } /* Generic.Output */ .highlight .gp { color: #555555 } /* Generic.Prompt */ .highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #666666 } /* Generic.Subheading */ .highlight .gt { color: #aa0000 } /* Generic.Traceback */ .highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */ .highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */ .highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */ .highlight .kp { color: #008800 } /* Keyword.Pseudo */ .highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */ .highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */ .highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */ .highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */ .highlight .na { color: #336699 } /* Name.Attribute */ .highlight .nb { color: #003388 } /* Name.Builtin */ .highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */ .highlight .no { color: #003366; font-weight: bold } /* Name.Constant */ .highlight .nd { color: #555555 } /* Name.Decorator */ .highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */ .highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */ .highlight .nl { color: #336699; font-style: italic } /* Name.Label */ .highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */ .highlight .py { color: #336699; font-weight: bold } /* Name.Property */ .highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #336699 } /* Name.Variable */ .highlight .ow { color: #008800 } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */ .highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */ .highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */ .highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */ .highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */ .highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */ .highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */ .highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */ .highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */ .highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */ .highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */
#
# Copyright (C) 2010-2012 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk

ARCH:=arm
BOARD:=cns3xxx
BOARDNAME:=Cavium Networks Econa CNS3xxx
FEATURES:=squashfs fpu gpio pcie usb usbgadget
CPU_TYPE:=mpcore
CPU_SUBTYPE:=vfp
MAINTAINER:=Imre Kaloz <kaloz@openwrt.org>

LINUX_VERSION:=3.8.13

include $(INCLUDE_DIR)/target.mk

define Target/Description
	Build images for Cavium Networks Econa CNS3xxx based boards,
	eg. the Gateworks Laguna family
endef

KERNELNAME:="zImage"

DEFAULT_PACKAGES += kmod-ath9k kmod-usb2 wpad-mini

$(eval $(call BuildTarget))
PFN_ORDER(pg) = order; list_add_tail(&pg->list, &heap[zone][node][order]); spin_unlock(&heap_lock); } /* * Scrub all unallocated pages in all heap zones. This function is more * convoluted than appears necessary because we do not want to continuously * hold the lock or disable interrupts while scrubbing very large memory areas. */ void scrub_heap_pages(void) { void *p; unsigned long pfn; printk("Scrubbing Free RAM: "); for ( pfn = 0; pfn < max_page; pfn++ ) { /* Every 100MB, print a progress dot. */ if ( (pfn % ((100*1024*1024)/PAGE_SIZE)) == 0 ) printk("."); process_pending_timers(); /* Quick lock-free check. */ if ( allocated_in_map(pfn) ) continue; spin_lock_irq(&heap_lock); /* Re-check page status with lock held. */ if ( !allocated_in_map(pfn) ) { if ( IS_XEN_HEAP_FRAME(mfn_to_page(pfn)) ) { p = page_to_virt(mfn_to_page(pfn)); memguard_unguard_range(p, PAGE_SIZE); clear_page(p); memguard_guard_range(p, PAGE_SIZE); } else { p = map_domain_page(pfn); clear_page(p); unmap_domain_page(p); } } spin_unlock_irq(&heap_lock); } printk("done.\n"); } /************************* * XEN-HEAP SUB-ALLOCATOR */ void init_xenheap_pages(paddr_t ps, paddr_t pe) { unsigned long flags; ps = round_pgup(ps); pe = round_pgdown(pe); if ( pe <= ps ) return; memguard_guard_range(maddr_to_virt(ps), pe - ps); /* * Yuk! Ensure there is a one-page buffer between Xen and Dom zones, to * prevent merging of power-of-two blocks across the zone boundary. */ if ( !IS_XEN_HEAP_FRAME(maddr_to_page(pe)) ) pe -= PAGE_SIZE; local_irq_save(flags); init_heap_pages(MEMZONE_XEN, maddr_to_page(ps), (pe - ps) >> PAGE_SHIFT); local_irq_restore(flags); } void *alloc_xenheap_pages(unsigned int order) { unsigned long flags; struct page_info *pg; int i; local_irq_save(flags); pg = alloc_heap_pages(MEMZONE_XEN, smp_processor_id(), order); local_irq_restore(flags); if ( unlikely(pg == NULL) ) goto no_memory; memguard_unguard_range(page_to_virt(pg), 1 << (order + PAGE_SHIFT)); for ( i = 0; i < (1 << order); i++ ) { pg[i].count_info = 0; pg[i].u.inuse._domain = 0; pg[i].u.inuse.type_info = 0; } return page_to_virt(pg); no_memory: printk("Cannot handle page request order %d!\n", order); return NULL; } void free_xenheap_pages(void *v, unsigned int order) { unsigned long flags; if ( v == NULL ) return; memguard_guard_range(v, 1 << (order + PAGE_SHIFT)); local_irq_save(flags); free_heap_pages(MEMZONE_XEN, virt_to_page(v), order); local_irq_restore(flags); } /************************* * DOMAIN-HEAP SUB-ALLOCATOR */ void init_domheap_pages(paddr_t ps, paddr_t pe) { unsigned long s_tot, e_tot, s_dma, e_dma, s_nrm, e_nrm; ASSERT(!in_irq()); s_tot = round_pgup(ps) >> PAGE_SHIFT; e_tot = round_pgdown(pe) >> PAGE_SHIFT; s_dma = min(s_tot, MAX_DMADOM_PFN + 1); e_dma = min(e_tot, MAX_DMADOM_PFN + 1); if ( s_dma < e_dma ) init_heap_pages(MEMZONE_DMADOM, mfn_to_page(s_dma), e_dma - s_dma); s_nrm = max(s_tot, MAX_DMADOM_PFN + 1); e_nrm = max(e_tot, MAX_DMADOM_PFN + 1); if ( s_nrm < e_nrm ) init_heap_pages(MEMZONE_DOM, mfn_to_page(s_nrm), e_nrm - s_nrm); } int assign_pages( struct domain *d, struct page_info *pg, unsigned int order, unsigned int memflags) { unsigned long i; spin_lock(&d->page_alloc_lock); if ( unlikely(test_bit(_DOMF_dying, &d->domain_flags)) ) { DPRINTK("Cannot assign page to domain%d -- dying.\n", d->domain_id); goto fail; } if ( !(memflags & MEMF_no_refcount) ) { if ( unlikely((d->tot_pages + (1 << order)) > d->max_pages) ) { DPRINTK("Over-allocation for domain %u: %u > %u\n", d->domain_id, d->tot_pages + (1 << order), d->max_pages); goto fail; } if ( unlikely(d->tot_pages == 0) ) get_knownalive_domain(d); d->tot_pages += 1 << order; } for ( i = 0; i < (1 << order); i++ ) { ASSERT(page_get_owner(&pg[i]) == NULL); ASSERT((pg[i].count_info & ~(PGC_allocated | 1)) == 0); page_set_owner(&pg[i], d); wmb(); /* Domain pointer must be visible before updating refcnt. */ pg[i].count_info = PGC_allocated | 1; list_add_tail(&pg[i].list, &d->page_list); } spin_unlock(&d->page_alloc_lock); return 0; fail: spin_unlock(&d->page_alloc_lock); return -1; } struct page_info *__alloc_domheap_pages( struct domain *d, unsigned int cpu, unsigned int order, unsigned int memflags) { struct page_info *pg = NULL; cpumask_t mask; unsigned long i; ASSERT(!in_irq()); if ( !(memflags & MEMF_dma) ) { pg = alloc_heap_pages(MEMZONE_DOM, cpu, order); /* Failure? Then check if we can fall back to the DMA pool. */ if ( unlikely(pg == NULL) && ((order > MAX_ORDER) || (avail_heap_pages(MEMZONE_DMADOM,-1) < (lowmem_emergency_pool_pages + (1UL << order)))) ) return NULL; } if ( pg == NULL ) if ( (pg = alloc_heap_pages(MEMZONE_DMADOM, cpu, order)) == NULL ) return NULL; mask = pg->u.free.cpumask; tlbflush_filter(mask, pg->tlbflush_timestamp); pg->count_info = 0; pg->u.inuse._domain = 0; pg->u.inuse.type_info = 0; for ( i = 1; i < (1 << order); i++ ) { /* Add in any extra CPUs that need flushing because of this page. */ cpumask_t extra_cpus_mask; cpus_andnot(extra_cpus_mask, pg[i].u.free.cpumask, mask); tlbflush_filter(extra_cpus_mask, pg[i].tlbflush_timestamp); cpus_or(mask, mask, extra_cpus_mask); pg[i].count_info = 0; pg[i].u.inuse._domain = 0; pg[i].u.inuse.type_info = 0; page_set_owner(&pg[i], NULL); } if ( unlikely(!cpus_empty(mask)) ) { perfc_incrc(need_flush_tlb_flush); flush_tlb_mask(mask); } if ( (d != NULL) && assign_pages(d, pg, order, memflags) ) { free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order); return NULL; } return pg; } inline struct page_info *alloc_domheap_pages( struct domain *d, unsigned int order, unsigned int flags) { return __alloc_domheap_pages(d, smp_processor_id(), order, flags); } void free_domheap_pages(struct page_info *pg, unsigned int order) { int i, drop_dom_ref; struct domain *d = page_get_owner(pg); ASSERT(!in_irq()); if ( unlikely(IS_XEN_HEAP_FRAME(pg)) ) { /* NB. May recursively lock from relinquish_memory(). */ spin_lock_recursive(&d->page_alloc_lock); for ( i = 0; i < (1 << order); i++ ) list_del(&pg[i].list); d->xenheap_pages -= 1 << order; drop_dom_ref = (d->xenheap_pages == 0); spin_unlock_recursive(&d->page_alloc_lock); } else if ( likely(d != NULL) ) { /* NB. May recursively lock from relinquish_memory(). */ spin_lock_recursive(&d->page_alloc_lock); for ( i = 0; i < (1 << order); i++ ) { shadow_drop_references(d, &pg[i]); ASSERT((pg[i].u.inuse.type_info & PGT_count_mask) == 0); pg[i].tlbflush_timestamp = tlbflush_current_time(); pg[i].u.free.cpumask = d->domain_dirty_cpumask; list_del(&pg[i].list); } d->tot_pages -= 1 << order; drop_dom_ref = (d->tot_pages == 0); spin_unlock_recursive(&d->page_alloc_lock); if ( likely(!test_bit(_DOMF_dying, &d->domain_flags)) ) { free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order); } else { /* * Normally we expect a domain to clear pages before freeing them, * if it cares about the secrecy of their contents. However, after * a domain has died we assume responsibility for erasure. */ for ( i = 0; i < (1 << order); i++ ) { spin_lock(&page_scrub_lock); list_add(&pg[i].list, &page_scrub_list); scrub_pages++; spin_unlock(&page_scrub_lock); } } } else { /* Freeing anonymous domain-heap pages. */ for ( i = 0; i < (1 << order); i++ ) cpus_clear(pg[i].u.free.cpumask); free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order); drop_dom_ref = 0; } if ( drop_dom_ref ) put_domain(d); } unsigned long avail_heap_pages(int zone, int node) { int i,j, num_nodes = num_online_nodes(); unsigned long free_pages = 0; for (i=0; i lowmem_emergency_pool_pages ) avail_dma -= lowmem_emergency_pool_pages; else avail_dma = 0; return avail_nrm + avail_dma; } unsigned long avail_nodeheap_pages(int node) { return avail_heap_pages(-1, node); } static void pagealloc_keyhandler(unsigned char key) { printk("Physical memory information:\n"); printk(" Xen heap: %lukB free\n" " DMA heap: %lukB free\n" " Dom heap: %lukB free\n", avail_heap_pages(MEMZONE_XEN, -1) << (PAGE_SHIFT-10), avail_heap_pages(MEMZONE_DMADOM, -1) <<(PAGE_SHIFT-10), avail_heap_pages(MEMZONE_DOM, -1) <<(PAGE_SHIFT-10)); } static __init int pagealloc_keyhandler_init(void) { register_keyhandler('m', pagealloc_keyhandler, "memory info"); return 0; } __initcall(pagealloc_keyhandler_init); /************************* * PAGE SCRUBBING */ static void page_scrub_softirq(void) { struct list_head *ent; struct page_info *pg; void *p; int i; s_time_t start = NOW(); /* Aim to do 1ms of work (ten percent of a 10ms jiffy). */ do { spin_lock(&page_scrub_lock); if ( unlikely((ent = page_scrub_list.next) == &page_scrub_list) ) { spin_unlock(&page_scrub_lock); return; } /* Peel up to 16 pages from the list. */ for ( i = 0; i < 16; i++ ) { if ( ent->next == &page_scrub_list ) break; ent = ent->next; } /* Remove peeled pages from the list. */ ent->next->prev = &page_scrub_list; page_scrub_list.next = ent->next; scrub_pages -= (i+1); spin_unlock(&page_scrub_lock); /* Working backwards, scrub each page in turn. */ while ( ent != &page_scrub_list ) { pg = list_entry(ent, struct page_info, list); ent = ent->prev; p = map_domain_page(page_to_mfn(pg)); clear_page(p); unmap_domain_page(p); free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, 0); } } while ( (NOW() - start) < MILLISECS(1) ); } unsigned long avail_scrub_pages(void) { return scrub_pages; } static unsigned long count_bucket(struct list_head* l, int order) { unsigned long total_pages = 0; int pages = 1 << order; struct page_info *pg; list_for_each_entry(pg, l, list) total_pages += pages; return total_pages; } static void dump_heap(unsigned char key) { s_time_t now = NOW(); int i,j,k; unsigned long total; printk("'%c' pressed -> dumping heap info (now-0x%X:%08X)\n", key, (u32)(now>>32), (u32)now); for (i=0; i %lu pages\n", i, j, k, total); } } static __init int register_heap_trigger(void) { register_keyhandler('H', dump_heap, "dump heap info"); return 0; } __initcall(register_heap_trigger); static __init int page_scrub_init(void) { open_softirq(PAGE_SCRUB_SOFTIRQ, page_scrub_softirq); return 0; } __initcall(page_scrub_init); /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */