blob: 2e37b72c5cb86f7a4052c2084917b7e0f0ba66a6 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
|
/******************************************************************************
* domain_page.h
*
* Allow temporary mapping of domain pages. Based on ideas from the
* Linux PKMAP code -- the copyrights and credits are retained below.
*/
/*
* (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
* Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de *
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*/
#include <xeno/config.h>
#include <xeno/sched.h>
#include <xeno/mm.h>
#include <asm/domain_page.h>
#include <asm/pgalloc.h>
static unsigned int map_idx[NR_CPUS];
/* Use a spare PTE bit to mark entries ready for recycling. */
#define READY_FOR_TLB_FLUSH (1<<10)
static void flush_all_ready_maps(void)
{
unsigned long *cache = mapcache[smp_processor_id()];
/* A bit skanky -- depends on having an aligned PAGE_SIZE set of PTEs. */
do { if ( (*cache & READY_FOR_TLB_FLUSH) ) *cache = 0; }
while ( ((unsigned long)(++cache) & ~PAGE_MASK) != 0 );
local_flush_tlb();
}
void *map_domain_mem(unsigned long pa)
{
unsigned long va;
int cpu = smp_processor_id();
unsigned int idx;
unsigned long *cache = mapcache[cpu];
unsigned long flags;
local_irq_save(flags);
for ( ; ; )
{
idx = map_idx[cpu] = (map_idx[cpu] + 1) & (MAPCACHE_ENTRIES - 1);
if ( idx == 0 ) flush_all_ready_maps();
if ( cache[idx] == 0 ) break;
}
cache[idx] = (pa & PAGE_MASK) | PAGE_HYPERVISOR;
local_irq_restore(flags);
va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT) + (pa & ~PAGE_MASK);
return (void *)va;
}
void unmap_domain_mem(void *va)
{
unsigned int idx;
idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
mapcache[smp_processor_id()][idx] |= READY_FOR_TLB_FLUSH;
}
|