#ifndef __ASM_X86_MM_H__ #define __ASM_X86_MM_H__ #include #include #include #include #include #include #include #include #include #include #include /* * Per-page-frame information. * * Every architecture must ensure the following: * 1. 'struct pfn_info' contains a 'struct list_head list'. * 2. Provide a PFN_ORDER() macro for accessing the order of a free page. */ #define PFN_ORDER(_pfn) ((_pfn)->u.free.order) struct pfn_info { /* Each frame can be threaded onto a doubly-linked list. */ struct list_head list; /* Reference count and various PGC_xxx flags and fields. */ u32 count_info; /* Context-dependent fields follow... */ union { /* Page is in use: ((count_info & PGC_count_mask) != 0). */ struct { /* Owner of this page (NULL if page is anonymous). */ struct domain *domain; /* Type reference count and various PGT_xxx flags and fields. */ u32 type_info; } inuse; /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */ struct { /* Mask of possibly-tainted TLBs. */ unsigned long cpu_mask; /* Order-size of the free chunk this page is the head of. */ u8 order; } free; } u; /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */ u32 tlbflush_timestamp; }; /* The following page types are MUTUALLY EXCLUSIVE. */ #define PGT_none (0<<29) /* no special uses of this page */ #define PGT_l1_page_table (1<<29) /* using this page as an L1 page table? */ #define PGT_l2_page_table (2<<29) /* using this page as an L2 page table? */ #define PGT_l3_page_table (3<<29) /* using this page as an L3 page table? */ #define PGT_l4_page_table (4<<29) /* using this page as an L4 page table? */ #define PGT_gdt_page (5<<29) /* using this page in a GDT? */ #define PGT_ldt_page (6<<29) /* using this page in an LDT? */ #define PGT_writable_page (7<<29) /* has writable mappings of this page? */ #define PGT_type_mask (7<<29) /* Bits 29-31. */ /* Has this page been validated for use as its current type? */ #define _PGT_validated 28 #define PGT_validated (1U<<_PGT_validated) /* Owning guest has pinned this page to its current type? */ #define _PGT_pinned 27 #define PGT_pinned (1U<<_PGT_pinned) /* The 10 most significant bits of virt address if this is a page table. */ #define PGT_va_shift 17 #define PGT_va_mask (((1U<<10)-1)<u.inuse.domain = (_dom); \ /* The incremented type count is intended to pin to 'writable'. */ \ (_pfn)->u.inuse.type_info = PGT_writable_page | PGT_validated | 1; \ wmb(); /* install valid domain ptr before updating refcnt. */ \ spin_lock(&(_dom)->page_alloc_lock); \ /* _dom holds an allocation reference */ \ ASSERT((_pfn)->count_info == 0); \ (_pfn)->count_info |= PGC_allocated | 1; \ if ( unlikely((_dom)->xenheap_pages++ == 0) ) \ get_knownalive_domain(_dom); \ list_add_tail(&(_pfn)->list, &(_dom)->xenpage_list); \ spin_unlock(&(_dom)->page_alloc_lock); \ } while ( 0 ) #define INVALID_P2M_ENTRY (~0UL) extern struct pfn_info *frame_table; extern unsigned long frame_table_size; extern unsigned long max_page; void init_frametable(void *frametable_vstart, unsigned long nr_pages); int alloc_page_type(struct pfn_info *page, unsigned int type); void free_page_type(struct pfn_info *page, unsigned int type); static inline void put_page(struct pfn_info *page) { u32 nx, x, y = page->count_info; do { x = y; nx = x - 1; } while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) ); if ( unlikely((nx & PGC_count_mask) == 0) ) free_domheap_page(page); } static inline int get_page(struct pfn_info *page, struct domain *domain) { u32 x, nx, y = page->count_info; struct domain *d, *nd = page->u.inuse.domain; do { x = y; nx = x + 1; d = nd; if ( unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */ unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */ unlikely(d != domain) ) /* Wrong owner? */ { DPRINTK("Error pfn %08lx: ed=%p, sd=%p, caf=%08x, taf=%08x\n", page_to_pfn(page), domain, d, x, page->u.inuse.type_info); return 0; } __asm__ __volatile__( LOCK_PREFIX "cmpxchg8b %3" : "=d" (nd), "=a" (y), "=c" (d), "=m" (*(volatile u64 *)(&page->count_info)) : "0" (d), "1" (x), "c" (d), "b" (nx) ); } while ( unlikely(nd != d) || unlikely(y != x) ); return 1; } void put_page_type(struct pfn_info *page); int get_page_type(struct pfn_info *page, u32 type); static inline void put_page_and_type(struct pfn_info *page) { put_page_type(page); put_page(page); } static inline int get_page_and_type(struct pfn_info *page, struct domain *domain, u32 type) { int rc = get_page(page, domain); if ( likely(rc) && unlikely(!get_page_type(page, type)) ) { put_page(page); rc = 0; } return rc; } #define ASSERT_PAGE_IS_TYPE(_p, _t) \ ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \ ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0) #define ASSERT_PAGE_IS_DOMAIN(_p, _d) \ ASSERT(((_p)->count_info & PGC_count_mask) != 0); \ ASSERT((_p)->u.inuse.domain == (_d)) int check_descriptor(unsigned long *d); /* * Use currently-executing domain's pagetables on the specified CPUs. * i.e., stop borrowing someone else's tables if you are the idle domain. */ void synchronise_pagetables(unsigned long cpu_mask); /* * The MPT (machine->physical mapping table) is an array of word-sized * values, indexed on machine frame number. It is expected that guest OSes * will use it to store a "physical" frame number to give the appearance of * contiguous (or near contiguous) physical memory. */ #undef machine_to_phys_mapping #ifdef __x86_64__ extern unsigned long *machine_to_phys_mapping; #else #define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START) #endif /* Part of the domain API. */ int do_mmu_update(mmu_upda
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -7,6 +7,7 @@
  * Licence: GPL
  */
 #include <linux/module.h>
+#include <linux/delay.h>
 #include <linux/fs.h>
 #include <linux/blkdev.h>
 #include <linux/bio.h>
@@ -211,13 +212,14 @@ static void block2mtd_free_device(struct
 
 
 /* FIXME: ensure that mtd->size % erase_size == 0 */
-static struct block2mtd_dev *add_device(char *devname, int erase_size, const char *mtdname)
+static struct block2mtd_dev *add_device(char *devname, int erase_size, const char *mtdname, int timeout)
 {
 	const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
-	struct block_device *bdev;
+	struct block_device *bdev = ERR_PTR(-ENODEV);
 	struct block2mtd_dev *dev;
 	struct mtd_partition *part;
 	char *name;
+	int i;
 
 	if (!devname)
 		return NULL;
@@ -228,15 +230,20 @@ static struct block2mtd_dev *add_device(
 
 	/* Get a handle on the device */
 	bdev = blkdev_get_by_path(devname, mode, dev);
+
 #ifndef MODULE
-	if (IS_ERR(bdev)) {
+	for (i = 0; IS_ERR(bdev) && i <= timeout; i++) {
+		dev_t devt;
 
-		/* We might not have rootfs mounted at this point. Try
-		   to resolve the device name by other means. */
+		if (i)
+			msleep(1000);
+		wait_for_device_probe();
+
+		devt = name_to_dev_t(devname);
+		if (!devt)
+			continue;
 
-		dev_t devt = name_to_dev_t(devname);
-		if (devt)
-			bdev = blkdev_get_by_dev(devt, mode, dev);
+		bdev = blkdev_get_by_dev(devt, mode, dev);
 	}
 #endif
 
@@ -360,11 +367,12 @@ static char block2mtd_paramline[80 + 12]
 
 static int block2mtd_setup2(const char *val)
 {
-	char buf[80 + 12 + 80]; /* 80 for device, 12 for erase size, 80 for name */
+	char buf[80 + 12 + 80 + 8]; /* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
 	char *str = buf;
-	char *token[3];
+	char *token[4];
 	char *name;
 	size_t erase_size = PAGE_SIZE;
+	unsigned long timeout = 0;
 	int i, ret;
 
 	if (strnlen(val, sizeof(buf)) >= sizeof(buf))
@@ -373,7 +381,7 @@ static int block2mtd_setup2(const char *
 	strcpy(str, val);
 	kill_final_newline(str);
 
-	for (i = 0; i < 3; i++)
+	for (i = 0; i < 4; i++)
 		token[i] = strsep(&str, ",");
 
 	if (str)
@@ -395,7 +403,10 @@ static int block2mtd_setup2(const char *
 	if (token[2] && (strlen(token[2]) + 1 > 80))
 		parse_err("mtd device name too long");
 
-	add_device(name, erase_size, token[2]);
+	if (token[3] && kstrtoul(token[3], 0, &timeout))
+		parse_err("invalid timeout");
+
+	add_device(name, erase_size, token[2], timeout);
 
 	return 0;
 }
@@ -429,7 +440,7 @@ static int block2mtd_setup(const char *v
 
 
 module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
+MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>[,<timeout>]]]\"");
 
 static int __init block2mtd_init(void)
 {
@@ -462,7 +473,7 @@ static void block2mtd_exit(void)
 }
 
 
-module_init(block2mtd_init);
+late_initcall(block2mtd_init);
 module_exit(block2mtd_exit);
 
 MODULE_LICENSE("GPL");