/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright (C) IBM Corp. 2005, 2006 * * Authors: Hollis Blanchard * Jimi Xenidis * Ryan Harper */ #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VERBOSE #define MEM_LOG(_f, _a...) \ printk("DOM%u: (file=mm.c, line=%d) " _f "\n", \ current->domain->domain_id , __LINE__ , ## _a ) #else #define MEM_LOG(_f, _a...) ((void)0) #endif /* Frame table and its size in pages. */ struct page_info *frame_table; unsigned long max_page; unsigned long total_pages; /* machine to phys mapping to used by all domains */ unsigned long *machine_phys_mapping; void __init init_frametable(void) { unsigned long p; unsigned long nr_pages; int i; nr_pages = PFN_UP(max_page * sizeof(struct page_info)); p = alloc_boot_pages(nr_pages, 1); if (p == 0) panic("Not enough memory for frame table\n"); frame_table = (struct page_info *)(p << PAGE_SHIFT); for (i = 0; i < nr_pages; i += 1) clear_page((void *)((p + i) << PAGE_SHIFT)); } /* Array of PFNs, indexed by MFN. */ void __init init_machine_to_phys_table(void) { unsigned long p; unsigned long nr_pages; int i; nr_pages = PFN_UP(max_page * sizeof(unsigned long)); p = alloc_boot_pages(nr_pages, 1); if (p == 0) panic("Not enough memory for machine phys mapping table\n"); machine_phys_mapping = (unsigned long *)(p << PAGE_SHIFT); for (i = 0; i < nr_pages; i += 1) clear_page((void *)((p + i) << PAGE_SHIFT)); } void share_xen_page_with_guest( struct page_info *page, struct domain *d, int readonly) { if ( page_get_owner(page) == d ) return; /* this causes us to leak pages in the Domain and reuslts in * Zombie domains, I think we are missing a piece, until we find * it we disable the following code */ set_gpfn_from_mfn(page_to_mfn(page), INVALID_M2P_ENTRY); spin_lock(&d->page_alloc_lock); /* The incremented type count pins as writable or read-only. */ page->u.inuse.type_info = (readonly ? PGT_none : PGT_writable_page); page->u.inuse.type_info |= PGT_validated | 1; page_set_owner(page, d); wmb(); /* install valid domain ptr before updating refcnt. */ ASSERT(page->count_info == 0); /* Only add to the allocation list if the domain isn't dying. */ if ( !d->is_dying ) { page->count_info |= PGC_allocated | 1; if ( unlikely(d->xenheap_pages++ == 0) ) get_knownalive_domain(d); list_add_tail(&page->list, &d->xenpage_list); } spin_unlock(&d->page_alloc_lock); } void share_xen_page_with_privileged_guests( struct page_info *page, int readonly) { unimplemented(); } static ulong foreign_to_mfn(struct domain *d, ulong pfn) { pfn -= 1UL << cpu_foreign_map_order(); BUG_ON(pfn >= d->arch.foreign_mfn_count); return d->arch.foreign_mfns[pfn]; } static int set_foreign(struct domain *d, ulong pfn, ulong mfn) { pfn -= 1UL << cpu_foreign_map_order(); BUG_ON(pfn >= d->arch.foreign_mfn_count); d->arch.foreign_mfns[pfn] = mfn; return 0; } static int create_grant_va_mapping( unsigned long va, unsigned long frame, struct vcpu *v) { if (v->domain->domain_id != 0) { printk("only Dom0 can map a grant entry\n"); BUG(); return GNTST_permission_denied; } set_foreign(v->domain, va >> PAGE_SHIFT, frame); return GNTST_okay; } static int destroy_grant_va_mapping( unsigned long addr, unsigned long frame, struct domain *d) { if (d->domain_id != 0) { printk("only Dom0 can map a grant entry\n"); BUG(); return GNTST_permission_denied; } set_foreign(d, addr >> PAGE_SHIFT, ~0UL); return GNTST_okay; } int create_grant_host_mapping( unsigned long addr, unsigned long frame, unsigned int flags, unsigned int cache_flags) { if (flags & GNTMAP_application_map) { printk("%s: GNTMAP_application_map not supported\n", __func__); BUG(); return GNTST_general_error; } if (flags & GNTMAP_contains_pte) { printk("%s: GNTMAP_contains_pte not supported\n", __func__); BUG(); return GNTST_general_error; } if (cache_flags) { printk("%s: cache_flags not supported\n", __func__); BUG(); return GNTST_general_error; } return create_grant_va_mapping(addr, frame, current); } int replace_grant_host_mapping( unsigned long addr, unsigned long frame, unsigned long new_addr, unsigned int flags) { if (new_addr) { printk("%s: new_addr not supported\n", __func__); BUG(); return GNTST_general_error; } if (flags & GNTMAP_contains_pte) { printk("%s: GNTMAP_contains_pte not supported\n", __func__); BUG(); return GNTST_general_error; } /* may have force the remove here */ return destroy_grant_va_mapping(addr, frame, current->domain); } int steal_page(struct domain *d, struct page_info *page, unsigned int memflags) { panic("%s called\n", __func__); return 1; } void put_page_type(struct page_info *page) { unsigned long nx, x, y = page->u.inuse.type_info; do { x = y; nx = x - 1; ASSERT((x & PGT_count_mask) != 0); /* * The page should always be validated while a reference is held. The * exception is during domain destruction, when we forcibly invalidate * page-table pages if we detect a referential loop. * See domain.c:relinquish_list(). */ ASSERT((x & PGT_validated) || page_get_owner(page)->is_dying); if ( unlikely((nx & PGT_count_mask) == 0) ) { /* Record TLB information for flush later. */ page->tlbflush_timestamp = tlbflush_current_time(); } } while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) ); } int get_page_type(struct page_info *page, unsigned long type) { unsigned long nx, x, y = page->u.inuse.type_info; ASSERT(!(type & ~PGT_type_mask)); again: do { x = y; nx = x + 1; if ( unlikely((nx & PGT_count_mask) == 0) ) { MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page)); return 0; } else if ( unlikely((x & PGT_count_mask) == 0) ) { if ( (x & PGT_type_mask) != type ) { /* * On type change we check to flush stale TLB entries. This * may be unnecessary (e.g., page was GDT/LDT) but those * circumstances should be very rare. */ cpumask_t mask = page_get_owner(page)->domain_dirty_cpumask; tlbflush_filter(mask, page->tlbflush_timestamp); if ( unlikely(!cpus_empty(mask)) ) { perfc_incr(need_flush_tlb_flush); flush_tlb_mask(mask); } /* We lose existing type, back pointer, and validity. */ nx &= ~(PGT_type_mask | PGT_validated); nx |= type; /* No special validation needed for writable pages. */ /* Page tables and GDT/LDT need to be scanned for validity. */
/*
 * netlink/cache.h		Caching Module
 *
 *	This library is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU Lesser General Public
 *	License as published by the Free Software Foundation version 2.1
 *	of the License.
 *
 * Copyright (c) 2003-2008 Thomas Graf <tgraf@suug.ch>
 */

#ifndef NETLINK_CACHE_H_
#define NETLINK_CACHE_H_

#include <netlink/netlink.h>
#include <netlink/msg.h>
#include <netlink/utils.h>
#include <netlink/object.h>
#include <netlink/cache-api.h>

#ifdef __cplusplus
extern "C" {
#endif

struct nl_cache;

typedef void (*change_func_t)(struct nl_cache *, struct nl_object *, int);

/* Access Functions */
extern int			nl_cache_nitems(struct nl_cache *);
extern int			nl_cache_nitems_filter(struct nl_cache *,
						       struct nl_object *);
extern struct nl_cache_ops *	nl_cache_get_ops(struct nl_cache *);
extern struct nl_object *	nl_cache_get_first(struct nl_cache *);
extern struct nl_object *	nl_cache_get_last(struct nl_cache *);
extern struct nl_object *	nl_cache_get_next(struct nl_object *);
extern struct nl_object *	nl_cache_get_prev(struct nl_object *);

extern struct nl_cache *	nl_cache_alloc(struct nl_cache_ops *);
extern int			nl_cache_alloc_and_fill(struct nl_cache_ops *,
							struct nl_sock *,
							struct nl_cache **);
extern int			nl_cache_alloc_name(const char *,
						    struct nl_cache **);
extern struct nl_cache *	nl_cache_subset(struct nl_cache *,
						struct nl_object *);
extern void			nl_cache_clear(struct nl_cache *);
extern void			nl_cache_free(struct nl_cache *);

/* Cache modification */
extern int			nl_cache_add(struct nl_cache *,
					     struct nl_object *);
extern int			nl_cache_parse_and_add(struct nl_cache *,
						       struct nl_msg *);
extern void			nl_cache_remove(struct nl_object *);
extern int			nl_cache_refill(struct nl_sock *,
						struct nl_cache *);
extern int			nl_cache_pickup(struct nl_sock *,
						struct nl_cache *);
extern int			nl_cache_resync(struct nl_sock *,
						struct nl_cache *,
						change_func_t);
extern int			nl_cache_include(struct nl_cache *,
						 struct nl_object *,
						 change_func_t);

/* General */
extern int			nl_cache_is_empty(struct nl_cache *);
extern void			nl_cache_mark_all(struct nl_cache *);

/* Dumping */
extern void			nl_cache_dump(struct nl_cache *,
					      struct nl_dump_params *);
extern void			nl_cache_dump_filter(struct nl_cache *,
						     struct nl_dump_params *,
						     struct nl_object *);

/* Iterators */
#ifdef disabled
extern void			nl_cache_foreach(struct nl_cache *,
						 void (*cb)(struct nl_object *,
							    void *),
						 void *arg);
extern void			nl_cache_foreach_filter(struct nl_cache *,
							struct nl_object *,
							void (*cb)(struct
								   nl_object *,
								   void *),
							void *arg);
#endif

/* --- cache management --- */

/* Cache type management */
extern struct nl_cache_ops *	nl_cache_ops_lookup(const char *);
extern struct nl_cache_ops *	nl_cache_ops_associate(int, int);
extern struct nl_msgtype *	nl_msgtype_lookup(struct nl_cache_ops *, int);
extern void			nl_cache_ops_foreach(void (*cb)(struct nl_cache_ops *, void *), void *);
extern int			nl_cache_mngt_register(struct nl_cache_ops *);
extern int			nl_cache_mngt_unregister(struct nl_cache_ops *);

/* Global cache provisioning/requiring */
extern void			nl_cache_mngt_provide(struct nl_cache *);
extern void			nl_cache_mngt_unprovide(struct nl_cache *);
extern struct nl_cache *	nl_cache_mngt_require(const char *);

struct nl_cache_mngr;

#define NL_AUTO_PROVIDE		1

extern int			nl_cache_mngr_alloc(struct nl_sock *,
						    int, int,
						    struct nl_cache_mngr **);
extern int			nl_cache_mngr_add(struct nl_cache_mngr *,
						  const char *,
						  change_func_t,
						  struct nl_cache **);
extern int			nl_cache_mngr_get_fd(struct nl_cache_mngr *);
extern int			nl_cache_mngr_poll(struct nl_cache_mngr *,
						   int);
extern int			nl_cache_mngr_data_ready(struct nl_cache_mngr *);
extern void			nl_cache_mngr_free(struct nl_cache_mngr *);

#ifdef __cplusplus
}
#endif

#endif