aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/xen/spinlock.h
blob: 12b0a8927c7ad158d6f0a722cdb4cd3994563d52 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
#ifndef __SPINLOCK_H__
#define __SPINLOCK_H__

#include <asm/system.h>
#include <asm/spinlock.h>

#ifndef NDEBUG
struct lock_debug {
    int irq_safe; /* +1: IRQ-safe; 0: not IRQ-safe; -1: don't know yet */
};
#define _LOCK_DEBUG { -1 }
void spin_debug_enable(void);
void spin_debug_disable(void);
#else
struct lock_debug { };
#define _LOCK_DEBUG { }
#define spin_debug_enable() ((void)0)
#define spin_debug_disable() ((void)0)
#endif

#ifdef LOCK_PROFILE

#include <public/sysctl.h>

/*
    lock profiling on:

    Global locks which should be subject to profiling must be declared via
    DEFINE_SPINLOCK.

    For locks in structures further measures are necessary:
    - the structure definition must include a profile_head with exactly this
      name:

      struct lock_profile_qhead   profile_head;

    - the single locks which are subject to profiling have to be initialized
      via

      spin_lock_init_prof(ptr, lock);

      with ptr being the main structure pointer and lock the spinlock field

    - each structure has to be added to profiling with

      lock_profile_register_struct(type, ptr, idx, print);

      with:
        type:  something like LOCKPROF_TYPE_PERDOM
        ptr:   pointer to the structure
        idx:   index of that structure, e.g. domid
        print: descriptive string like "domain"

    - removing of a structure is done via

      lock_profile_deregister_struct(type, ptr);
*/

struct spinlock;

struct lock_profile {
    struct lock_profile *next;       /* forward link */
    char                *name;       /* lock name */
    struct spinlock     *lock;       /* the lock itself */
    u64                 lock_cnt;    /* # of complete locking ops */
    u64                 block_cnt;   /* # of complete wait for lock */
    s64                 time_hold;   /* cumulated lock time */
    s64                 time_block;  /* cumulated wait time */
    s64                 time_locked; /* system time of last locking */
};

struct lock_profile_qhead {
    struct lock_profile_qhead *head_q; /* next head of this type */
    struct lock_profile       *elem_q; /* first element in q */
    int32_t                   idx;     /* index for printout */
};

#define _LOCK_PROFILE(name) { 0, #name, &name, 0, 0, 0, 0, 0 }
#define _LOCK_PROFILE_PTR(name)                                               \
    static struct lock_profile *__lock_profile_##name                         \
    __used_section(".lockprofile.data") =                                     \
    &__lock_profile_data_##name
#define _SPIN_LOCK_UNLOCKED(x) { _RAW_SPIN_LOCK_UNLOCKED, 0xfffu, 0,          \
                                 _LOCK_DEBUG, x }
#define SPIN_LOCK_UNLOCKED _SPIN_LOCK_UNLOCKED(NULL)
#define DEFINE_SPINLOCK(l)                                                    \
    spinlock_t l = _SPIN_LOCK_UNLOCKED(NULL);                                 \
    static struct lock_profile __lock_profile_data_##l = _LOCK_PROFILE(l);    \
    _LOCK_PROFILE_PTR(l)

#define spin_lock_init_prof(s, l)                                             \
    do {                                                                      \
        struct lock_profile *prof;                                            \
        prof = xzalloc(struct lock_profile);                                  \
        if (!prof) break;                                                     \
        prof->name = #l;                                                      \
        prof->lock = &(s)->l;                                                 \
        (s)->l = (spinlock_t)_SPIN_LOCK_UNLOCKED(prof);                       \
        prof->next = (s)->profile_head.elem_q;                                \
        (s)->profile_head.elem_q = prof;                                      \
    } while(0)

void _lock_profile_register_struct(
    int32_t, struct lock_profile_qhead *, int32_t, char *);
void _lock_profile_deregister_struct(int32_t, struct lock_profile_qhead *);

#define lock_profile_register_struct(type, ptr, idx, print)                   \
    _lock_profile_register_struct(type, &((ptr)->profile_head), idx, print)
#define lock_profile_deregister_struct(type, ptr)                             \
    _lock_profile_deregister_struct(type, &((ptr)->profile_head))

extern int spinlock_profile_control(xen_sysctl_lockprof_op_t *pc);
extern void spinlock_profile_printall(unsigned char key);
extern void spinlock_profile_reset(unsigned char key);

#else

struct lock_profile_qhead { };

#define SPIN_LOCK_UNLOCKED                                                    \
    { _RAW_SPIN_LOCK_UNLOCKED, 0xfffu, 0, _LOCK_DEBUG }
#define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED

#define spin_lock_init_prof(s, l) spin_lock_init(&((s)->l))
#define lock_profile_register_struct(type, ptr, idx, print)
#define lock_profile_deregister_struct(type, ptr)

#endif

typedef struct spinlock {
    raw_spinlock_t raw;
    u16 recurse_cpu:12;
    u16 recurse_cnt:4;
    struct lock_debug debug;
#ifdef LOCK_PROFILE
    struct lock_profile *profile;
#endif
} spinlock_t;


#define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)

typedef struct {
    raw_rwlock_t raw;
    struct lock_debug debug;
} rwlock_t;

#define RW_LOCK_UNLOCKED { _RAW_RW_LOCK_UNLOCKED, _LOCK_DEBUG }
#define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED
#define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)

void _spin_lock(spinlock_t *lock);
void _spin_lock_irq(spinlock_t *lock);
unsigned long _spin_lock_irqsave(spinlock_t *lock);

void _spin_unlock(spinlock_t *lock);
void _spin_unlock_irq(spinlock_t *lock);
void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);

int _spin_is_locked(spinlock_t *lock);
int _spin_trylock(spinlock_t *lock);
void _spin_barrier(spinlock_t *lock);

int _spin_trylock_recursive(spinlock_t *lock);
void _spin_lock_recursive(spinlock_t *lock);
void _spin_unlock_recursive(spinlock_t *lock);

void _read_lock(rwlock_t *lock);
void _read_lock_irq(rwlock_t *lock);
unsigned long _read_lock_irqsave(rwlock_t *lock);

void _read_unlock(rwlock_t *lock);
void _read_unlock_irq(rwlock_t *lock);
void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
int _read_trylock(rwlock_t *lock);

void _write_lock(rwlock_t *lock);
void _write_lock_irq(rwlock_t *lock);
unsigned long _write_lock_irqsave(rwlock_t *lock);
int _write_trylock(rwlock_t *lock);

void _write_unlock(rwlock_t *lock);
void _write_unlock_irq(rwlock_t *lock);
void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags);

int _rw_is_locked(rwlock_t *lock);
int _rw_is_write_locked(rwlock_t *lock);

#define spin_lock(l)                  _spin_lock(l)
#define spin_lock_irq(l)              _spin_lock_irq(l)
#define spin_lock_irqsave(l, f)                                 \
    ({                                                          \
        BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
        ((f) = _spin_lock_irqsave(l));                          \
    })

#define spin_unlock(l)                _spin_unlock(l)
#define spin_unlock_irq(l)            _spin_unlock_irq(l)
#define spin_unlock_irqrestore(l, f)  _spin_unlock_irqrestore(l, f)

#define spin_is_locked(l)             _spin_is_locked(l)
#define spin_trylock(l)               _spin_trylock(l)

#define spin_trylock_irqsave(lock, flags)       \
({                                              \
    local_irq_save(flags);                      \
    spin_trylock(lock) ?                        \
    1 : ({ local_irq_restore(flags); 0; });     \
})

/* Ensure a lock is quiescent between two critical operations. */
#define spin_barrier(l)               _spin_barrier(l)

/*
 * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
 * reentered recursively on the same CPU. All critical regions that may form
 * part of a recursively-nested set must be protected by these forms. If there
 * are any critical regions that cannot form part of such a set, they can use
 * standard spin_[un]lock().
 */
#define spin_trylock_recursive(l)     _spin_trylock_recursive(l)
#define spin_lock_recursive(l)        _spin_lock_recursive(l)
#define spin_unlock_recursive(l)      _spin_unlock_recursive(l)

#define read_lock(l)                  _read_lock(l)
#define read_lock_irq(l)              _read_lock_irq(l)
#define read_lock_irqsave(l, f)                                 \
    ({                                                          \
        BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
        ((f) = _read_lock_irqsave(l));                          \
    })

#define read_unlock(l)                _read_unlock(l)
#define read_unlock_irq(l)            _read_unlock_irq(l)
#define read_unlock_irqrestore(l, f)  _read_unlock_irqrestore(l, f)
#define read_trylock(l)               _read_trylock(l)

#define write_lock(l)                 _write_lock(l)
#define write_lock_irq(l)             _write_lock_irq(l)
#define write_lock_irqsave(l, f)                                \
    ({                                                          \
        BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
        ((f) = _write_lock_irqsave(l));                         \
    })
#define write_trylock(l)              _write_trylock(l)

#define write_unlock(l)               _write_unlock(l)
#define write_unlock_irq(l)           _write_unlock_irq(l)
#define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f)

#define rw_is_locked(l)               _rw_is_locked(l)
#define rw_is_write_locked(l)         _rw_is_write_locked(l)

#endif /* __SPINLOCK_H__ */