.. _setheaders: Set Headers =========== This feature lets you specify a set of headers to be added to requests or responses, based on a filter pattern. You can specify these either on the command-line, or through an interactive editor in mitmproxy. Example: Set the **Host** header to "example.com" for all requests. .. code-block:: none mitmdump -R http://example.com --setheader :~q:Host:example.com ================== ======================= command-line ``--setheader PATTERN`` mitmproxy shortcut :kbd:`o` then :kbd:`H` ================== ======================= a> index : xen/xen
xenJames
aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/ia64/xen/xenmem.c
blob: 47c3742eee75fdb588f2b6367772b0971a18fae1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
/*
 * Xen memory allocator routines
 *
 * Copyright (C) 2005 Hewlett-Packard Co
 *	Dan Magenheimer <dan.magenheimer@hp.com>
 * Copyright (C) 2005 Intel Corp.
 *
 * Routines used by ia64 machines with contiguous (or virtually contiguous)
 * memory.
 */

#include <linux/config.h>
#include <asm/pgtable.h>
#include <xen/mm.h>

#ifdef CONFIG_VIRTUAL_FRAME_TABLE
#include <linux/efi.h>
#include <asm/pgalloc.h>

extern unsigned long frametable_pg_dir[];

#define FRAMETABLE_PGD_OFFSET(ADDR) \
	(frametable_pg_dir + (((ADDR) >> PGDIR_SHIFT) & \
	((1UL << (PAGE_SHIFT - 3)) - 1)))

#define FRAMETABLE_PMD_OFFSET(PGD, ADDR) \
	__va((unsigned long *)(PGD) + (((ADDR) >> PMD_SHIFT) & \
	((1UL << (PAGE_SHIFT - 3)) - 1)))

#define FRAMETABLE_PTE_OFFSET(PMD, ADDR) \
	(pte_t *)__va((unsigned long *)(PMD) + (((ADDR) >> PAGE_SHIFT) & \
	((1UL << (PAGE_SHIFT - 3)) - 1)))

static unsigned long table_size;
static int opt_contig_mem = 0;
boolean_param("contig_mem", opt_contig_mem);
#else
#define opt_contig_mem 1
#endif

struct page_info *frame_table __read_mostly;
unsigned long max_page;

/*
 * Set up the page tables.
 */
volatile unsigned long *mpt_table __read_mostly;

void __init
paging_init (void)
{
	unsigned int mpt_order;
	unsigned long mpt_table_size;
	unsigned long i;

	if (!opt_contig_mem) {
		/* mpt_table is already allocated at this point. */
		return;
	}

	/* Create machine to physical mapping table
	 * NOTE: similar to frame table, later we may need virtually
	 * mapped mpt table if large hole exists. Also MAX_ORDER needs
	 * to be changed in common code, which only support 16M by far
	 */
	mpt_table_size = max_page * sizeof(unsigned long);
	mpt_order = get_order(mpt_table_size);
	ASSERT(mpt_order <= MAX_ORDER);
	if ((mpt_table = alloc_xenheap_pages(mpt_order)) == NULL)
		panic("Not enough memory to bootstrap Xen.\n");

	printk("machine to physical table: 0x%lx mpt_table_size 0x%lx\n"
	       "mpt_order %u max_page 0x%lx\n",
	       (u64)mpt_table, mpt_table_size, mpt_order, max_page);
	for (i = 0;
	     i < ((1UL << mpt_order) << PAGE_SHIFT) / sizeof(mpt_table[0]);
	     i++) {
		mpt_table[i] = INVALID_M2P_ENTRY;
	}
}

#ifdef CONFIG_VIRTUAL_FRAME_TABLE

static unsigned long __init
alloc_dir_page(void)
{
	unsigned long mfn = alloc_boot_pages(1, 1);
	unsigned long dir;
	if (!mfn)
		panic("Not enough memory for virtual frame table!\n");
	++table_size;
	dir = mfn << PAGE_SHIFT;
	clear_page(__va(dir));
	return dir;
}

static inline unsigned long __init
alloc_table_page(unsigned long fill)
{
	unsigned long mfn = alloc_boot_pages(1, 1);
	unsigned long *table;
	unsigned long i;
	if (!mfn)
		panic("Not enough memory for virtual frame table!\n");
	++table_size;
	table = (unsigned long *)__va((mfn << PAGE_SHIFT));
	for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
	    table[i] = fill;
	return mfn;
}

static void __init
create_page_table(unsigned long start_page, unsigned long end_page,
                  unsigned long fill)
{
	unsigned long address;
	unsigned long *dir;
	pte_t *pteptr;

	for (address = start_page; address < end_page; address += PAGE_SIZE) {
		dir = FRAMETABLE_PGD_OFFSET(address);
		if (!*dir)
			*dir = alloc_dir_page();
		dir = FRAMETABLE_PMD_OFFSET(*dir, address);
		if (!*dir)
			*dir = alloc_dir_page();
		pteptr = FRAMETABLE_PTE_OFFSET(*dir, address);
		if (pte_none(*pteptr))
			set_pte(pteptr, pfn_pte(alloc_table_page(fill),
			                        PAGE_KERNEL));
	}
}

static int __init
create_frametable_page_table (u64 start, u64 end, void *arg)
{
	struct page_info *map_start, *map_end;
	unsigned long start_page, end_page;

	map_start = frame_table + (__pa(start) >> PAGE_SHIFT);
	map_end   = frame_table + (__pa(end) >> PAGE_SHIFT);

	start_page = (unsigned long) map_start & PAGE_MASK;
	end_page = PAGE_ALIGN((unsigned long) map_end);

	create_page_table(start_page, end_page, 0L);
	return 0;
}

static int __init
create_mpttable_page_table (u64 start, u64 end, void *arg)
{
	unsigned long map_start, map_end;
	unsigned long start_page, end_page;

	map_start = (unsigned long)(mpt_table + (__pa(start) >> PAGE_SHIFT));
	map_end   = (unsigned long)(mpt_table + (__pa(end) >> PAGE_SHIFT));

	start_page = map_start & PAGE_MASK;
	end_page = PAGE_ALIGN(map_end);

	create_page_table(start_page, end_page, INVALID_M2P_ENTRY);
	return 0;
}

void __init init_virtual_frametable(void)
{
	/* Allocate virtual frame_table */
	frame_table = (struct page_info *) VIRT_FRAME_TABLE_ADDR;
	table_size = 0;
	efi_memmap_walk(create_frametable_page_table, NULL);

	printk("size of virtual frame_table: %lukB\n",
	       ((table_size << PAGE_SHIFT) >> 10));

	/* Allocate virtual mpt_table */
	table_size = 0;
	mpt_table = (unsigned long *)VIRT_FRAME_TABLE_END - max_page;
	efi_memmap_walk(create_mpttable_page_table, NULL);

	printk("virtual machine to physical table: %p size: %lukB\n"
	       "max_page: 0x%lx\n",
	       mpt_table, ((table_size << PAGE_SHIFT) >> 10), max_page);

	/*
	 * XXX work around for translate_domain_pte().
	 * It returns mfn=0 when the machine page isn't present.  This
	 * behavior is a work around for memory mapped I/O where no device
	 * is assigned.  Xen might access page_info of mfn=0, so it must
	 * be guaranteed that it exists.  Otherwise xen panics with tlb miss
	 * fault in xen's virtual address area.
	 *
	 * Once translate_domain_pte() is fixed correctly, this will
	 * be removed.
	 */
	if (!mfn_valid(0)) {
		printk("allocating frame table/mpt table at mfn 0.\n");
		create_frametable_page_table(0, PAGE_SIZE, NULL);
		create_mpttable_page_table(0, PAGE_SIZE, NULL);
	}
}

int
ia64_mfn_valid (unsigned long pfn)
{
	extern long ia64_frametable_probe(unsigned long);
	struct page_info *pg;
	int valid;

	if (opt_contig_mem)
		return 1;
	pg = mfn_to_page(pfn);
	valid = ia64_frametable_probe((unsigned long)pg);
	/* more check the whole struct of page_info */
	if (valid)
		valid = ia64_frametable_probe((unsigned long)(pg+1)-1);
	return valid;
}

EXPORT_SYMBOL(ia64_mfn_valid);

#endif /* CONFIG_VIRTUAL_FRAME_TABLE */

/* FIXME: postpone support to machines with big holes between physical memorys.
 * Current hack allows only efi memdesc upto 4G place. (See efi.c)
 */
#define FT_ALIGN_SIZE	(16UL << 20)
void __init init_frametable(void)
{
	unsigned long pfn;
	unsigned long frame_table_size;

#ifdef CONFIG_VIRTUAL_FRAME_TABLE
	if (!opt_contig_mem) {
		init_virtual_frametable();
		return;
	}
#endif

	frame_table_size = max_page * sizeof(struct page_info);
	frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;

	/* Request continuous trunk from boot allocator, since HV
	 * address is identity mapped */
	pfn = alloc_boot_pages(
            frame_table_size >> PAGE_SHIFT, FT_ALIGN_SIZE >> PAGE_SHIFT);
	if (pfn == 0)
		panic("Not enough memory for frame table.\n");

	frame_table = __va(pfn << PAGE_SHIFT);
	memset(frame_table, 0, frame_table_size);
	printk("size of frame_table: %lukB\n",
		frame_table_size >> 10);
}