aboutsummaryrefslogtreecommitdiffstats
path: root/linux-2.6.11-xen-sparse/include/asm-xen/queues.h
blob: eb17e33373f43c40166963ca799d4ef891afea6b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
/*
 * Oh dear. Task queues were removed from Linux 2.6 and replaced by work 
 * queues. Unfortunately the semantics is not the same. With task queues we 
 * can defer work until a particular event occurs -- this is not
 * straightforwardly done with work queues (queued work is performed asap, or
 * after some fixed timeout). Conversely, work queues are a (slightly) neater
 * way of deferring work to a process context than using task queues in 2.4.
 * 
 * This is a bit of a needless reimplementation -- should have just pulled
 * the code from 2.4, but I tried leveraging work queues to simplify things.
 * They didn't help. :-(
 */

#ifndef __QUEUES_H__
#define __QUEUES_H__

#include <linux/version.h>
#include <linux/list.h>
#include <linux/workqueue.h>

struct tq_struct { 
    void (*fn)(void *);
    void *arg;
    struct list_head list;
    unsigned long pending;
};
#define INIT_TQUEUE(_name, _fn, _arg)               \
    do {                                            \
        INIT_LIST_HEAD(&(_name)->list);             \
        (_name)->pending = 0;                       \
        (_name)->fn = (_fn); (_name)->arg = (_arg); \
    } while ( 0 )
#define DECLARE_TQUEUE(_name, _fn, _arg)            \
    struct tq_struct _name = { (_fn), (_arg), LIST_HEAD_INIT((_name).list), 0 }

typedef struct {
    struct list_head list;
    spinlock_t       lock;
} task_queue;
#define DECLARE_TASK_QUEUE(_name) \
    task_queue _name = { LIST_HEAD_INIT((_name).list), SPIN_LOCK_UNLOCKED }

static inline int queue_task(struct tq_struct *tqe, task_queue *tql)
{
    unsigned long flags;
    if ( test_and_set_bit(0, &tqe->pending) )
        return 0;
    spin_lock_irqsave(&tql->lock, flags);
    list_add_tail(&tqe->list, &tql->list);
    spin_unlock_irqrestore(&tql->lock, flags);
    return 1;
}

static inline void run_task_queue(task_queue *tql)
{
    struct list_head head, *ent;
    struct tq_struct *tqe;
    unsigned long flags;
    void (*fn)(void *);
    void *arg;

    spin_lock_irqsave(&tql->lock, flags);
    list_add(&head, &tql->list);
    list_del_init(&tql->list);
    spin_unlock_irqrestore(&tql->lock, flags);

    while ( !list_empty(&head) )
    {
        ent = head.next;
        list_del_init(ent);
        tqe = list_entry(ent, struct tq_struct, list);
        fn  = tqe->fn;
        arg = tqe->arg;
        wmb();
        tqe->pending = 0;
        fn(arg);
    }
}

#endif /* __QUEUES_H__ */