blob: 4b558eae834ffb709a0af86d403c94f8d01b23aa (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
|
/******************************************************************************
* flushtlb.h
*
* TLB flushes are timestamped using a global virtual 'clock' which ticks
* on any TLB flush on any processor.
*
* Copyright (c) 2003, K A Fraser
*/
#ifndef __FLUSHTLB_H__
#define __FLUSHTLB_H__
#include <xen/smp.h>
/*
* Every time the TLB clock passes an "epoch", every CPU's TLB is flushed.
* Therefore, if the current TLB time and a previously-read timestamp differ
* in their significant bits (i.e., ~TLBCLOCK_EPOCH_MASK), then the TLB clock
* has wrapped at least once and every CPU's TLB is guaranteed to have been
* flushed meanwhile.
* This allows us to deal gracefully with a bounded (a.k.a. wrapping) clock.
*/
#define TLBCLOCK_EPOCH_MASK ((1U<<16)-1)
/*
* 'cpu_stamp' is the current timestamp for the CPU we are testing.
* 'lastuse_stamp' is a timestamp taken when the PFN we are testing was last
* used for a purpose that may have caused the CPU's TLB to become tainted.
*/
static inline int NEED_FLUSH(u32 cpu_stamp, u32 lastuse_stamp)
{
/*
* Why does this work?
* 1. XOR sets high-order bits determines if stamps from differing epochs.
* 2. Subtraction sets high-order bits if 'cpu_stamp > lastuse_stamp'.
* In either case a flush is unnecessary: we therefore OR the results from
* (1) and (2), mask the high-order bits, and return the inverse.
*/
return !(((lastuse_stamp^cpu_stamp)|(lastuse_stamp-cpu_stamp)) &
~TLBCLOCK_EPOCH_MASK);
}
extern u32 tlbflush_clock;
extern u32 tlbflush_time[NR_CPUS];
extern void new_tlbflush_clock_period(void);
extern void write_cr3_counted(unsigned long pa);
extern void flush_tlb_counted(void);
#endif /* __FLUSHTLB_H__ */
|