aboutsummaryrefslogtreecommitdiffstats
path: root/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/pgalloc.h
blob: 482efb3d86119934db72c67f1d49e506c8de3006 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
#ifndef _X86_64_PGALLOC_H
#define _X86_64_PGALLOC_H

#include <asm/fixmap.h>
#include <asm/pda.h>
#include <linux/threads.h>
#include <linux/mm.h>
#include <asm/io.h>		/* for phys_to_virt and page_to_pseudophys */

#include <xen/features.h>
void make_page_readonly(void *va, unsigned int feature);
void make_page_writable(void *va, unsigned int feature);
void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
void make_pages_writable(void *va, unsigned int nr, unsigned int feature);

#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)

static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
	set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
}

static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
{
	if (unlikely((mm)->context.pinned)) {
		BUG_ON(HYPERVISOR_update_va_mapping(
			       (unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT),
			       pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));
		set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
	} else {
		*(pmd) = __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT));
	}
}

static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
	if (unlikely((mm)->context.pinned)) {
		BUG_ON(HYPERVISOR_update_va_mapping(
			       (unsigned long)pmd,
			       pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT, 
				       PAGE_KERNEL_RO), 0));
		set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
	} else {
		*(pud) =  __pud(_PAGE_TABLE | __pa(pmd));
	}
}

/*
 * We need to use the batch mode here, but pgd_pupulate() won't be
 * be called frequently.
 */
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
{
	if (unlikely((mm)->context.pinned)) {
		BUG_ON(HYPERVISOR_update_va_mapping(
			       (unsigned long)pud,
			       pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, 
				       PAGE_KERNEL_RO), 0));
		set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
		set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
	} else {
		*(pgd) =  __pgd(_PAGE_TABLE | __pa(pud));
		*(__user_pgd(pgd)) = *(pgd);
	}
}

extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
extern void pte_free(struct page *pte);

static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
	struct page *pg;

	pg = pte_alloc_one(mm, addr);
	return pg ? page_address(pg) : NULL;
}

static inline void pmd_free(pmd_t *pmd)
{
	BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
	pte_free(virt_to_page(pmd));
}

static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
	struct page *pg;

	pg = pte_alloc_one(mm, addr);
	return pg ? page_address(pg) : NULL;
}

static inline void pud_free(pud_t *pud)
{
	BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
	pte_free(virt_to_page(pud));
}

static inline void pgd_list_add(pgd_t *pgd)
{
	struct page *page = virt_to_page(pgd);

	spin_lock(&pgd_lock);
	page->index = (pgoff_t)pgd_list;
	if (pgd_list)
		pgd_list->private = (unsigned long)&page->index;
	pgd_list = page;
	page->private = (unsigned long)&pgd_list;
	spin_unlock(&pgd_lock);
}

static inline void pgd_list_del(pgd_t *pgd)
{
	struct page *next, **pprev, *page = virt_to_page(pgd);

	spin_lock(&pgd_lock);
	next = (struct page *)page->index;
	pprev = (struct page **)page->private;
	*pprev = next;
	if (next)
		next->private = (unsigned long)pprev;
	spin_unlock(&pgd_lock);
}

static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
	/*
	 * We allocate two contiguous pages for kernel and user.
	 */
	unsigned boundary;
	pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
	if (!pgd)
		return NULL;
	pgd_list_add(pgd);
	/*
	 * Copy kernel pointers in from init.
	 * Could keep a freelist or slab cache of those because the kernel
	 * part never changes.
	 */
	boundary = pgd_index(__PAGE_OFFSET);
	memset(pgd, 0, boundary * sizeof(pgd_t));
	memcpy(pgd + boundary,
	       init_level4_pgt + boundary,
	       (PTRS_PER_PGD - boundary) * sizeof(pgd_t));

	memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
	/*
	 * Set level3_user_pgt for vsyscall area
	 */
	set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START),
		__pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE));
	return pgd;
}

static inline void pgd_free(pgd_t *pgd)
{
	pte_t *ptep = virt_to_ptep(pgd);

	if (!pte_write(*ptep)) {
		xen_pgd_unpin(__pa(pgd));
		BUG_ON(HYPERVISOR_update_va_mapping(
			       (unsigned long)pgd,
			       pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
			       0));
	}

	ptep = virt_to_ptep(__user_pgd(pgd));

	if (!pte_write(*ptep)) {
		xen_pgd_unpin(__pa(__user_pgd(pgd)));
		BUG_ON(HYPERVISOR_update_va_mapping(
			       (unsigned long)__user_pgd(pgd),
			       pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT, 
				       PAGE_KERNEL),
			       0));
	}

	pgd_list_del(pgd);
	free_pages((unsigned long)pgd, 1);
}

static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
	pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
	if (pte)
		make_page_readonly(pte, XENFEAT_writable_page_tables);

	return pte;
}

/* Should really implement gc for free page table pages. This could be
   done with a reference count in struct page. */

static inline void pte_free_kernel(pte_t *pte)
{
	BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
	make_page_writable(pte, XENFEAT_writable_page_tables);
	free_page((unsigned long)pte); 
}

#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
#define __pmd_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
#define __pud_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))

#endif /* _X86_64_PGALLOC_H */