aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/adm5120/files/drivers/usb/host/adm5120-mem.c
blob: 924221be15dd27e3f1389cf011c042c10a55f084 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
/*
 * OHCI HCD (Host Controller Driver) for USB.
 *
 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
 *
 * This file is licenced under the GPL.
 */

/*-------------------------------------------------------------------------*/

/*
 * OHCI deals with three types of memory:
 *	- data used only by the HCD ... kmalloc is fine
 *	- async and periodic schedules, shared by HC and HCD ... these
 *	  need to use dma_pool or dma_alloc_coherent
 *	- driver buffers, read/written by HC ... the hcd glue or the
 *	  device driver provides us with dma addresses
 *
 * There's also "register" data, which is memory mapped.
 * No memory seen by this driver (or any HCD) may be paged out.
 */

/*-------------------------------------------------------------------------*/

static void admhc_hcd_init(struct admhcd *ahcd)
{
	ahcd->next_statechange = jiffies;
	spin_lock_init(&ahcd->lock);
	INIT_LIST_HEAD(&ahcd->pending);
}

/*-------------------------------------------------------------------------*/

static int admhc_mem_init(struct admhcd *ahcd)
{
	ahcd->td_cache = dma_pool_create("admhc_td",
		admhcd_to_hcd(ahcd)->self.controller,
		sizeof(struct td),
		TD_ALIGN, /* byte alignment */
		0 /* no page-crossing issues */
		);
	if (!ahcd->td_cache)
		goto err;

	ahcd->ed_cache = dma_pool_create("admhc_ed",
		admhcd_to_hcd(ahcd)->self.controller,
		sizeof(struct ed),
		ED_ALIGN, /* byte alignment */
		0 /* no page-crossing issues */
		);
	if (!ahcd->ed_cache)
		goto err_td_cache;

	return 0;

err_td_cache:
	dma_pool_destroy(ahcd->td_cache);
	ahcd->td_cache = NULL;
err:
	return -ENOMEM;
}

static void admhc_mem_cleanup(struct admhcd *ahcd)
{
	if (ahcd->td_cache) {
		dma_pool_destroy(ahcd->td_cache);
		ahcd->td_cache = NULL;
	}

	if (ahcd->ed_cache) {
		dma_pool_destroy(ahcd->ed_cache);
		ahcd->ed_cache = NULL;
	}
}

/*-------------------------------------------------------------------------*/

/* ahcd "done list" processing needs this mapping */
static inline struct td *dma_to_td(struct admhcd *ahcd, dma_addr_t td_dma)
{
	struct td *td;

	td_dma &= TD_MASK;
	td = ahcd->td_hash[TD_HASH_FUNC(td_dma)];
	while (td && td->td_dma != td_dma)
		td = td->td_hash;

	return td;
}

/* TDs ... */
static struct td *td_alloc(struct admhcd *ahcd, gfp_t mem_flags)
{
	dma_addr_t	dma;
	struct td	*td;

	td = dma_pool_alloc(ahcd->td_cache, mem_flags, &dma);
	if (!td)
		return NULL;

	/* in case ahcd fetches it, make it look dead */
	memset(td, 0, sizeof *td);
	td->hwNextTD = cpu_to_hc32(ahcd, dma);
	td->td_dma = dma;
	/* hashed in td_fill */

	return td;
}

static void td_free(struct admhcd *ahcd, struct td *td)
{
	struct td **prev = &ahcd->td_hash[TD_HASH_FUNC(td->td_dma)];

	while (*prev && *prev != td)
		prev = &(*prev)->td_hash;
	if (*prev)
		*prev = td->td_hash;
#if 0
	/* TODO: remove */
	else if ((td->hwINFO & cpu_to_hc32(ahcd, TD_DONE)) != 0)
		admhc_dbg (ahcd, "no hash for td %p\n", td);
#else
	else if ((td->flags & TD_FLAG_DONE) != 0)
		admhc_dbg (ahcd, "no hash for td %p\n", td);
#endif
	dma_pool_free(ahcd->td_cache, td, td->td_dma);
}

/*-------------------------------------------------------------------------*/

/* EDs ... */
static struct ed *ed_alloc(struct admhcd *ahcd, gfp_t mem_flags)
{
	dma_addr_t	dma;
	struct ed	*ed;

	ed = dma_pool_alloc(ahcd->ed_cache, mem_flags, &dma);
	if (!ed)
		return NULL;

	memset(ed, 0, sizeof(*ed));
	ed->dma = dma;

	INIT_LIST_HEAD(&ed->td_list);
	INIT_LIST_HEAD(&ed->urb_list);

	return ed;
}

static void ed_free(struct admhcd *ahcd, struct ed *ed)
{
	dma_pool_free(ahcd->ed_cache, ed, ed->dma);
}

/*-------------------------------------------------------------------------*/

/* URB priv ... */
static void urb_priv_free(struct admhcd *ahcd, struct urb_priv *urb_priv)
{
	int i;

	for (i = 0; i < urb_priv->td_cnt; i++)
		if (urb_priv->td[i])
			td_free(ahcd, urb_priv->td[i]);

	list_del(&urb_priv->pending);
	kfree(urb_priv);
}

static struct urb_priv *urb_priv_alloc(struct admhcd *ahcd, int num_tds,
		gfp_t mem_flags)
{
	struct urb_priv	*priv;

	/* allocate the private part of the URB */
	priv = kzalloc(sizeof(*priv) + sizeof(struct td) * num_tds, mem_flags);
	if (!priv)
		goto err;

	/* allocate the TDs (deferring hash chain updates) */
	for (priv->td_cnt = 0; priv->td_cnt < num_tds; priv->td_cnt++) {
		priv->td[priv->td_cnt] = td_alloc(ahcd, mem_flags);
		if (priv->td[priv->td_cnt] == NULL)
			goto err_free;
	}

	INIT_LIST_HEAD(&priv->pending);

	return priv;

err_free:
	urb_priv_free(ahcd, priv);
err:
	return NULL;
}