aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/ia64/regionreg.c
blob: 6653d4b6a8e497165d36385251e83afb4b225959 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
/*
 * Region register and region id management
 *
 * Copyright (C) 2001-2004 Hewlett-Packard Co.
 *	Dan Magenheimer (dan.magenheimer@hp.com
 *	Bret Mckee (bret.mckee@hp.com)
 *
 */


#include <linux/config.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <asm/page.h>
#include <asm/regionreg.h>
#include <asm/vhpt.h>


#define	IA64_MIN_IMPL_RID_BITS	(IA64_MIN_IMPL_RID_MSB+1)
#define	IA64_MAX_IMPL_RID_BITS	24

#define MIN_RIDS	(1 << IA64_MIN_IMPL_RID_BITS)
#define	MIN_RID_MAX	(MIN_RIDS - 1)
#define	MIN_RID_MASK	(MIN_RIDS - 1)
#define	MAX_RIDS	(1 << (IA64_MAX_IMPL_RID_BITS))
#define	MAX_RID		(MAX_RIDS - 1)
#define	MAX_RID_BLOCKS	(1 << (IA64_MAX_IMPL_RID_BITS-IA64_MIN_IMPL_RID_BITS))
#define RIDS_PER_RIDBLOCK MIN_RIDS

// This is the one global memory representation of the default Xen region reg
ia64_rr xen_rr;

#if 0
// following already defined in include/asm-ia64/gcc_intrin.h
// it should probably be ifdef'd out from there to ensure all region
// register usage is encapsulated in this file
static inline unsigned long
ia64_get_rr (unsigned long rr)
{
	    unsigned long r;
	    __asm__ __volatile__ (";;mov %0=rr[%1];;":"=r"(r):"r"(rr):"memory");
	    return r;
}

static inline void
ia64_set_rr (unsigned long rr, unsigned long rrv)
{
	    __asm__ __volatile__ (";;mov rr[%0]=%1;;"::"r"(rr),"r"(rrv):"memory");
}
#endif

// use this to allocate a rid out of the "Xen reserved rid block"
unsigned long allocate_reserved_rid(void)
{
	static unsigned long currentrid = XEN_DEFAULT_RID;
	unsigned long t = currentrid;

	unsigned long max = RIDS_PER_RIDBLOCK;

	if (++currentrid >= max) return(-1UL);
	return t;
}


// returns -1 if none available
unsigned long allocate_metaphysical_rr0(void)
{
	ia64_rr rrv;

	rrv.rid = allocate_reserved_rid();
	rrv.ps = PAGE_SHIFT;
	rrv.ve = 0;
	return rrv.rrval;
}

int deallocate_metaphysical_rid(unsigned long rid)
{
	// fix this when the increment allocation mechanism is fixed.
	return 1;
}


void init_rr(void)
{
	xen_rr.rrval = 0;
	xen_rr.ve = 0;
	xen_rr.rid = allocate_reserved_rid();
	xen_rr.ps = PAGE_SHIFT;

	printf("initialized xen_rr.rid=0x%lx\n", xen_rr.rid);
}

/*************************************
  Region Block setup/management
*************************************/

static int implemented_rid_bits = 0;
static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 };

void get_impl_rid_bits(void)
{
	// FIXME (call PAL)
//#ifdef CONFIG_MCKINLEY
	implemented_rid_bits = IA64_MAX_IMPL_RID_BITS;
//#else
//#error "rid ranges won't work on Merced"
//#endif
	if (implemented_rid_bits <= IA64_MIN_IMPL_RID_BITS ||
	    implemented_rid_bits > IA64_MAX_IMPL_RID_BITS)
		BUG();
}


/*
 * Allocate a power-of-two-sized chunk of region id space -- one or more
 *  "rid blocks"
 */
int allocate_rid_range(struct domain *d, unsigned long ridbits)
{
	int i, j, n_rid_blocks;

	if (implemented_rid_bits == 0) get_impl_rid_bits();
	
	if (ridbits >= IA64_MAX_IMPL_RID_BITS)
	ridbits = IA64_MAX_IMPL_RID_BITS - 1;
	
	if (ridbits < IA64_MIN_IMPL_RID_BITS)
	ridbits = IA64_MIN_IMPL_RID_BITS;

	// convert to rid_blocks and find one
	n_rid_blocks = ridbits - IA64_MIN_IMPL_RID_BITS + 1;
	
	// skip over block 0, reserved for "meta-physical mappings (and Xen)"
	for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) {
		if (ridblock_owner[i] == NULL) {
			for (j = i; j < i + n_rid_blocks; ++j) {
				if (ridblock_owner[j]) break;
			}
			if (ridblock_owner[j] == NULL) break;
		}
	}
	
	if (i >= MAX_RID_BLOCKS) return 0;
	
	// found an unused block:
	//   (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits)
	// mark this block as owned
	for (j = i; j < i + n_rid_blocks; ++j) ridblock_owner[j] = d;
	
	// setup domain struct
	d->rid_bits = ridbits;
	d->starting_rid = i << IA64_MIN_IMPL_RID_BITS;
	d->ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
printf("###allocating rid_range, domain %p: starting_rid=%lx, ending_rid=%lx\n",
d,d->starting_rid, d->ending_rid);
	
	return 1;
}


int deallocate_rid_range(struct domain *d)
{
	int i;
	int rid_block_end = d->ending_rid >> IA64_MIN_IMPL_RID_BITS;
	int rid_block_start = d->starting_rid >> IA64_MIN_IMPL_RID_BITS;

	return 1;  // KLUDGE ALERT
	//
	// not all domains will have allocated RIDs (physical mode loaders for instance)
	//
	if (d->rid_bits == 0) return 1;

#ifdef DEBUG
	for (i = rid_block_start; i < rid_block_end; ++i) {
	        ASSERT(ridblock_owner[i] == d);
	    }
#endif
	
	for (i = rid_block_start; i < rid_block_end; ++i)
	ridblock_owner[i] = NULL;
	
	d->rid_bits = 0;
	d->starting_rid = 0;
	d->ending_rid = 0;
	return 1;
}


// This function is purely for performance... apparently scrambling
//  bits in the region id makes for better hashing, which means better
//  use of the VHPT, which means better performance
// Note that the only time a RID should be mangled is when it is stored in
//  a region register; anytime it is "viewable" outside of this module,
//  it should be unmangled

//This appears to work in Xen... turn it on later so no complications yet
#define CONFIG_MANGLE_RIDS
#ifdef CONFIG_MANGLE_RIDS
static inline unsigned long
vmMangleRID(unsigned long RIDVal)
{
	union bits64 { unsigned char bytes[4]; unsigned long uint; };

	union bits64 t;
	unsigned char tmp;

	t.uint = RIDVal;
	tmp = t.bytes[1];
	t.bytes[1] = t.bytes[3];
	t.bytes[3] = tmp;

	return t.uint;
}

// since vmMangleRID is symmetric, use it for unmangling also
#define vmUnmangleRID(x)	vmMangleRID(x)
#else
// no mangling/unmangling
#define vmMangleRID(x)	(x)
#define vmUnmangleRID(x) (x)
#endif

static inline void
set_rr_no_srlz(unsigned long rr, unsigned long rrval)
{
	ia64_set_rr(rr, vmMangleRID(rrval));
}

void
set_rr(unsigned long rr, unsigned long rrval)
{
	ia64_set_rr(rr, vmMangleRID(rrval));
	ia64_srlz_d();
}

unsigned long
get_rr(unsigned long rr)
{
	return vmUnmangleRID(ia64_get_rr(rr));
}

static inline int validate_page_size(unsigned long ps)
{
	switch(ps) {
	    case 12: case 13: case 14: case 16: case 18:
	    case 20: case 22: case 24: case 26: case 28:
		return 1;
	    default:
		return 0;
	}
}

// validates and changes a single region register
// in the currently executing domain
// Passing a value of -1 is a (successful) no-op
// NOTE: DOES NOT SET VCPU's rrs[x] value!!
int set_one_rr(unsigned long rr, unsigned long val)
{
	struct vcpu *v = current;
	unsigned long rreg = REGION_NUMBER(rr);
	ia64_rr rrv, newrrv, memrrv;
	unsigned long newrid;

	if (val == -1) return 1;

	rrv.rrval = val;
	newrrv.rrval = 0;
	newrid = v->domain->starting_rid + rrv.rid;

	if (newrid > v->domain->ending_rid) {
		printk("can't set rr%d to %lx, starting_rid=%lx,"
			"ending_rid=%lx, val=%lx\n", rreg, newrid,
			v->domain->starting_rid,v->domain->ending_rid,val);
		return 0;
	}

	memrrv.rrval = rrv.rrval;
	if (rreg == 7) {
		newrrv.rid = newrid;
		newrrv.ve = VHPT_ENABLED_REGION_7;
		newrrv.ps = IA64_GRANULE_SHIFT;
		ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info);
	}
	else {
		newrrv.rid = newrid;
		// FIXME? region 6 needs to be uncached for EFI to work
		if (rreg == 6) newrrv.ve = VHPT_ENABLED_REGION_7;
		else newrrv.ve = VHPT_ENABLED_REGION_0_TO_6;
		newrrv.ps = PAGE_SHIFT;
		if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
		set_rr(rr,newrrv.rrval);
	}
	return 1;
}

// set rr0 to the passed rid (for metaphysical mode so don't use domain offset
int set_metaphysical_rr0(void)
{
	struct vcpu *v = current;
	ia64_rr rrv;
	
//	rrv.ve = 1; 	FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
	set_rr(0,v->arch.metaphysical_rr0);
}

// validates/changes region registers 0-6 in the currently executing domain
// Note that this is the one and only SP API (other than executing a privop)
// for a domain to use to change region registers
int set_all_rr( u64 rr0, u64 rr1, u64 rr2, u64 rr3,
		     u64 rr4, u64 rr5, u64 rr6, u64 rr7)
{
	if (!set_one_rr(0x0000000000000000L, rr0)) return 0;
	if (!set_one_rr(0x2000000000000000L, rr1)) return 0;
	if (!set_one_rr(0x4000000000000000L, rr2)) return 0;
	if (!set_one_rr(0x6000000000000000L, rr3)) return 0;
	if (!set_one_rr(0x8000000000000000L, rr4)) return 0;
	if (!set_one_rr(0xa000000000000000L, rr5)) return 0;
	if (!set_one_rr(0xc000000000000000L, rr6)) return 0;
	if (!set_one_rr(0xe000000000000000L, rr7)) return 0;
	return 1;
}

void init_all_rr(struct vcpu *v)
{
	ia64_rr rrv;

	rrv.rrval = 0;
	rrv.rrval = v->domain->arch.metaphysical_rr0;
	rrv.ps = PAGE_SHIFT;
	rrv.ve = 1;
if (!v->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); }
	v->vcpu_info->arch.rrs[0] = -1;
	v->vcpu_info->arch.rrs[1] = rrv.rrval;
	v->vcpu_info->arch.rrs[2] = rrv.rrval;
	v->vcpu_info->arch.rrs[3] = rrv.rrval;
	v->vcpu_info->arch.rrs[4] = rrv.rrval;
	v->vcpu_info->arch.rrs[5] = rrv.rrval;
	rrv.ve = 0; 
	v->vcpu_info->arch.rrs[6] = rrv.rrval;
//	v->shared_info->arch.rrs[7] = rrv.rrval;
}


/* XEN/ia64 INTERNAL ROUTINES */

unsigned long physicalize_rid(struct vcpu *v, unsigned long rrval)
{
	ia64_rr rrv;
	    
	rrv.rrval = rrval;
	rrv.rid += v->domain->starting_rid;
	return rrv.rrval;
}

unsigned long
virtualize_rid(struct vcpu *v, unsigned long rrval)
{
	ia64_rr rrv;
	    
	rrv.rrval = rrval;
	rrv.rid -= v->domain->starting_rid;
	return rrv.rrval;
}

// loads a thread's region register (0-6) state into
// the real physical region registers.  Returns the
// (possibly mangled) bits to store into rr7
// iff it is different than what is currently in physical
// rr7 (because we have to to assembly and physical mode
// to change rr7).  If no change to rr7 is required, returns 0.
//
unsigned long load_region_regs(struct vcpu *v)
{
	unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6, rr7;
	// TODO: These probably should be validated
	unsigned long bad = 0;

	if (v->vcpu_info->arch.metaphysical_mode) {
		ia64_rr rrv;

		rrv.rrval = 0;
		rrv.rid = v->domain->arch.metaphysical_rr0;
		rrv.ps = PAGE_SHIFT;
		rrv.ve = 1;
		rr0 = rrv.rrval;
		set_rr_no_srlz(0x0000000000000000L, rr0);
		ia64_srlz_d();
	}
	else {
		rr0 =  v->vcpu_info->arch.rrs[0];
		if (!set_one_rr(0x0000000000000000L, rr0)) bad |= 1;
	}
	rr1 =  v->vcpu_info->arch.rrs[1];
	rr2 =  v->vcpu_info->arch.rrs[2];
	rr3 =  v->vcpu_info->arch.rrs[3];
	rr4 =  v->vcpu_info->arch.rrs[4];
	rr5 =  v->vcpu_info->arch.rrs[5];
	rr6 =  v->vcpu_info->arch.rrs[6];
	rr7 =  v->vcpu_info->arch.rrs[7];
	if (!set_one_rr(0x2000000000000000L, rr1)) bad |= 2;
	if (!set_one_rr(0x4000000000000000L, rr2)) bad |= 4;
	if (!set_one_rr(0x6000000000000000L, rr3)) bad |= 8;
	if (!set_one_rr(0x8000000000000000L, rr4)) bad |= 0x10;
	if (!set_one_rr(0xa000000000000000L, rr5)) bad |= 0x20;
	if (!set_one_rr(0xc000000000000000L, rr6)) bad |= 0x40;
	if (!set_one_rr(0xe000000000000000L, rr7)) bad |= 0x80;
	if (bad) {
		panic_domain(0,"load_region_regs: can't set! bad=%lx\n",bad);
	}
}