aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/xen/event.h
blob: 734427266b1bc6e9380ebd1d561ee2df9097b869 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
/******************************************************************************
 * event.h
 * 
 * A nice interface for passing asynchronous events to guest OSes.
 * 
 * Copyright (c) 2002, K A Fraser
 */

#ifndef __XEN_EVENT_H__
#define __XEN_EVENT_H__

#include <xen/config.h>
#include <xen/sched.h>
#include <xen/smp.h>
#include <asm/bitops.h>

/*
 * EVENT-CHANNEL NOTIFICATIONS
 * NB. On x86, the atomic bit operations also act as memory barriers. There
 * is therefore sufficiently strict ordering for this architecture -- others
 * may require explicit memory barriers.
 */

static inline void evtchn_set_pending(struct vcpu *v, int port)
{
    struct domain *d = v->domain;
    shared_info_t *s = d->shared_info;
    int            running;

    /* These three operations must happen in strict order. */
    if ( !test_and_set_bit(port,    &s->evtchn_pending[0]) &&
         !test_bit        (port,    &s->evtchn_mask[0])    &&
         !test_and_set_bit(port>>5, &v->vcpu_info->evtchn_pending_sel) )
    {
        /* The VCPU pending flag must be set /after/ update to evtchn-pend. */
        set_bit(0, &v->vcpu_info->evtchn_upcall_pending);

        /*
         * NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of
         * pending flag. These values may fluctuate (after all, we hold no
         * locks) but the key insight is that each change will cause
         * evtchn_upcall_pending to be polled.
         * 
         * NB2. We save VCPUF_running across the unblock to avoid a needless
         * IPI for domains that we IPI'd to unblock.
         */
        running = test_bit(_VCPUF_running, &v->vcpu_flags);
        vcpu_unblock(v);
        if ( running )
            smp_send_event_check_cpu(v->processor);
    }
}

/*
 * send_guest_virq:
 *  @v:        VCPU to which virtual IRQ should be sent
 *  @virq:     Virtual IRQ number (VIRQ_*)
 */
static inline void send_guest_virq(struct vcpu *v, int virq)
{
    int port = v->virq_to_evtchn[virq];

    if ( likely(port != 0) )
        evtchn_set_pending(v, port);
}

/*
 * send_guest_pirq:
 *  @d:        Domain to which physical IRQ should be sent
 *  @pirq:     Physical IRQ number
 */
extern void send_guest_pirq(struct domain *d, int pirq);

#define event_pending(_d)                                     \
    ((_d)->vcpu_info->evtchn_upcall_pending && \
     !(_d)->vcpu_info->evtchn_upcall_mask)

#endif /* __XEN_EVENT_H__ */