aboutsummaryrefslogtreecommitdiffstats
path: root/quantum/rgblight.c
blob: b1b0f035d58d48bc53035db4655c04963d385b48 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
#include <avr/eeprom.h>
#include <avr/interrupt.h>
#include <util/delay.h>
#include "progmem.h"
#include "timer.h"
#include "rgblight.h"
#include "debug.h"

const uint8_t DIM_CURVE[] PROGMEM = {
	0, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3,
	3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4,
	4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
	6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8,
	8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 11, 11, 11,
	11, 11, 12, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15,
	15, 15, 16, 16, 16, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 20,
	20, 20, 21, 21, 22, 22, 22, 23, 23, 24, 24, 25, 25, 25, 26, 26,
	27, 27, 28, 28, 29, 29, 30, 30, 31, 32, 32, 33, 33, 34, 35, 35,
	36, 36, 37, 38, 38, 39, 40, 40, 41, 42, 43, 43, 44, 45, 46, 47,
	48, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
	63, 64, 65, 66, 68, 69, 70, 71, 73, 74, 75, 76, 78, 79, 81, 82,
	83, 85, 86, 88, 90, 91, 93, 94, 96, 98, 99, 101, 103, 105, 107, 109,
	110, 112, 114, 116, 118, 121, 123, 125, 127, 129, 132, 134, 136, 139, 141, 144,
	146, 149, 151, 154, 157, 159, 162, 165, 168, 171, 174, 177, 180, 183, 186, 190,
	193, 196, 200, 203, 207, 211, 214, 218, 222, 226, 230, 234, 238, 242, 248, 255,
};
const uint8_t RGBLED_BREATHING_TABLE[] PROGMEM = {0,0,0,0,1,1,1,2,2,3,4,5,5,6,7,9,10,11,12,14,15,17,18,20,21,23,25,27,29,31,33,35,37,40,42,44,47,49,52,54,57,59,62,65,67,70,73,76,79,82,85,88,90,93,97,100,103,106,109,112,115,118,121,124,127,131,134,137,140,143,146,149,152,155,158,162,165,167,170,173,176,179,182,185,188,190,193,196,198,201,203,206,208,211,213,215,218,220,222,224,226,228,230,232,234,235,237,238,240,241,243,244,245,246,248,249,250,250,251,252,253,253,254,254,254,255,255,255,255,255,255,255,254,254,254,253,253,252,251,250,250,249,248,246,245,244,243,241,240,238,237,235,234,232,230,228,226,224,222,220,218,215,213,211,208,206,203,201,198,196,193,190,188,185,182,179,176,173,170,167,165,162,158,155,152,149,146,143,140,137,134,131,128,124,121,118,115,112,109,106,103,100,97,93,90,88,85,82,79,76,73,70,67,65,62,59,57,54,52,49,47,44,42,40,37,35,33,31,29,27,25,23,21,20,18,17,15,14,12,11,10,9,7,6,5,5,4,3,2,2,1,1,1,0,0,0};
const uint8_t RGBLED_BREATHING_INTERVALS[] PROGMEM = {30, 20, 10, 5};
const uint8_t RGBLED_RAINBOW_MOOD_INTERVALS[] PROGMEM = {120, 60, 30};
const uint8_t RGBLED_RAINBOW_SWIRL_INTERVALS[] PROGMEM = {100, 50, 20};
const uint8_t RGBLED_SNAKE_INTERVALS[] PROGMEM = {100, 50, 20};
const uint8_t RGBLED_KNIGHT_INTERVALS[] PROGMEM = {100, 50, 20};

rgblight_config_t rgblight_config;
rgblight_config_t inmem_config;
struct cRGB led[RGBLED_NUM];
uint8_t rgblight_inited = 0;


void sethsv(uint16_t hue, uint8_t sat, uint8_t val, struct cRGB *led1) {
	/* convert hue, saturation and brightness ( HSB/HSV ) to RGB
	The DIM_CURVE is used only on brightness/value and on saturation (inverted).
	This looks the most natural.
	*/
  uint8_t r = 0, g = 0, b = 0;

  val = pgm_read_byte(&DIM_CURVE[val]);
	sat = 255 - pgm_read_byte(&DIM_CURVE[255 - sat]);

	uint8_t base;

	if (sat == 0) { // Acromatic color (gray). Hue doesn't mind.
		r = val;
		g = val;
		b = val;
	} else  {
		base = ((255 - sat) * val) >> 8;

		switch (hue / 60) {
		case 0:
			r = val;
			g = (((val - base)*hue) / 60) + base;
			b = base;
			break;

		case 1:
			r = (((val - base)*(60 - (hue % 60))) / 60) + base;
			g = val;
			b = base;
			break;

		case 2:
			r = base;
			g = val;
			b = (((val - base)*(hue % 60)) / 60) + base;
			break;

		case 3:
			r = base;
			g = (((val - base)*(60 - (hue % 60))) / 60) + base;
			b = val;
			break;

		case 4:
			r = (((val - base)*(hue % 60)) / 60) + base;
			g = base;
			b = val;
			break;

		case 5:
			r = val;
			g = base;
			b = (((val - base)*(60 - (hue % 60))) / 60) + base;
			break;
		}
	}
  setrgb(r,g,b, led1);
}

void setrgb(uint8_t r, uint8_t g, uint8_t b, struct cRGB *led1) {
  (*led1).r = r;
  (*led1).g = g;
  (*led1).b = b;
}


uint32_t eeconfig_read_rgblight(void) {
  return eeprom_read_dword(EECONFIG_RGBLIGHT);
}
void eeconfig_update_rgblight(uint32_t val) {
  eeprom_update_dword(EECONFIG_RGBLIGHT, val);
}
void eeconfig_update_rgblight_default(void) {
	dprintf("eeconfig_update_rgblight_default\n");
	rgblight_config.enable = 1;
	rgblight_config.mode = 1;
	rgblight_config.hue = 200;
	rgblight_config.sat = 204;
	rgblight_config.val = 204;
	eeconfig_update_rgblight(rgblight_config.raw);
}
void eeconfig_debug_rgblight(void) {
	dprintf("rgblight_config eprom\n");
	dprintf("rgblight_config.enable = %d\n", rgblight_config.enable);
	dprintf("rghlight_config.mode = %d\n", rgblight_config.mode);
	dprintf("rgblight_config.hue = %d\n", rgblight_config.hue);
	dprintf("rgblight_config.sat = %d\n", rgblight_config.sat);
	dprintf("rgblight_config.val = %d\n", rgblight_config.val);
}

void rgblight_init(void) {
  debug_enable = 1; // Debug ON!
	dprintf("rgblight_init called.\n");
  rgblight_inited = 1;
	dprintf("rgblight_init start!\n");
  if (!eeconfig_is_enabled()) {
		dprintf("rgblight_init eeconfig is not enabled.\n");
    eeconfig_init();
		eeconfig_update_rgblight_default();
  }
  rgblight_config.raw = eeconfig_read_rgblight();
	if (!rgblight_config.mode) {
		dprintf("rgblight_init rgblight_config.mode = 0. Write default values to EEPROM.\n");
		eeconfig_update_rgblight_default();
		rgblight_config.raw = eeconfig_read_rgblight();
	}
	eeconfig_debug_rgblight(); // display current eeprom values

	#if !defined(AUDIO_ENABLE) && defined(RGBLIGHT_TIMER)
		rgblight_timer_init(); // setup the timer
	#endif

  if (rgblight_config.enable) {
    rgblight_mode(rgblight_config.mode);
  }
}

void rgblight_increase(void) {
	uint8_t mode = 0;
  if (rgblight_config.mode < RGBLIGHT_MODES) {
    mode = rgblight_config.mode + 1;
  }
	rgblight_mode(mode);
}

void rgblight_decrease(void) {
	uint8_t mode = 0;
  if (rgblight_config.mode > 1) { //mode will never < 1, if mode is less than 1, eeprom need to be initialized.
    mode = rgblight_config.mode-1;
  }
	rgblight_mode(mode);
}

void rgblight_step(void) {
	uint8_t mode = 0;
  mode = rgblight_config.mode + 1;
  if (mode > RGBLIGHT_MODES) {
    mode = 1;
  }
	rgblight_mode(mode);
}

void rgblight_mode(uint8_t mode) {
	if (!rgblight_config.enable) {
		return;
	}
  if (mode<1) {
		rgblight_config.mode = 1;
	} else if (mode > RGBLIGHT_MODES) {
		rgblight_config.mode = RGBLIGHT_MODES;
	} else {
		rgblight_config.mode = mode;
	}
  eeconfig_update_rgblight(rgblight_config.raw);
  xprintf("rgblight mode: %u\n", rgblight_config.mode);
	if (rgblight_config.mode == 1) {
		#if !defined(AUDIO_ENABLE) && defined(RGBLIGHT_TIMER)
			rgblight_timer_disable();
		#endif
	} else if (rgblight_config.mode >=2 && rgblight_config.mode <=23) {
		// MODE 2-5, breathing
		// MODE 6-8, rainbow mood
		// MODE 9-14, rainbow swirl
		// MODE 15-20, snake
		// MODE 21-23, knight

		#if !defined(AUDIO_ENABLE) && defined(RGBLIGHT_TIMER)
			rgblight_timer_enable();
		#endif
	}
  rgblight_sethsv(rgblight_config.hue, rgblight_config.sat, rgblight_config.val);
}

void rgblight_toggle(void) {
  rgblight_config.enable ^= 1;
  eeconfig_update_rgblight(rgblight_config.raw);
  xprintf("rgblight toggle: rgblight_config.enable = %u\n", rgblight_config.enable);
	if (rgblight_config.enable) {
		rgblight_mode(rgblight_config.mode);
	} else {

		#if !defined(AUDIO_ENABLE) && defined(RGBLIGHT_TIMER)
			rgblight_timer_disable();
		#endif
		_delay_ms(50);
		rgblight_set();
	}
}


void rgblight_increase_hue(void){
	uint16_t hue;
  hue = (rgblight_config.hue+RGBLIGHT_HUE_STEP) % 360;
  rgblight_sethsv(hue, rgblight_config.sat, rgblight_config.val);
}
void rgblight_decrease_hue(void){
	uint16_t hue;
	if (rgblight_config.hue-RGBLIGHT_HUE_STEP <0 ) {
		hue = (rgblight_config.hue+360-RGBLIGHT_HUE_STEP) % 360;
	} else {
		hue = (rgblight_config.hue-RGBLIGHT_HUE_STEP) % 360;
	}
  rgblight_sethsv(hue, rgblight_config.sat, rgblight_config.val);
}
void rgblight_increase_sat(void) {
	uint8_t sat;
  if (rgblight_config.sat + RGBLIGHT_SAT_STEP > 255) {
    sat = 255;
  } else {
    sat = rgblight_config.sat+RGBLIGHT_SAT_STEP;
  }
  rgblight_sethsv(rgblight_config.hue, sat, rgblight_config.val);
}
void rgblight_decrease_sat(void){
	uint8_t sat;
  if (rgblight_config.sat - RGBLIGHT_SAT_STEP < 0) {
    sat = 0;
  } else {
    sat = rgblight_config.sat-RGBLIGHT_SAT_STEP;
  }
  rgblight_sethsv(rgblight_config.hue, sat, rgblight_config.val);
}
void rgblight_increase_val(void){
	uint8_t val;
  if (rgblight_config.val + RGBLIGHT_VAL_STEP > 255) {
    val = 255;
  } else {
    val = rgblight_config.val+RGBLIGHT_VAL_STEP;
  }
  rgblight_sethsv(rgblight_config.hue, rgblight_config.sat, val);
}
void rgblight_decrease_val(void) {
	uint8_t val;
  if (rgblight_config.val - RGBLIGHT_VAL_STEP < 0) {
    val = 0;
  } else {
    val = rgblight_config.val-RGBLIGHT_VAL_STEP;
  }
  rgblight_sethsv(rgblight_config.hue, rgblight_config.sat, val);
}

void rgblight_sethsv_noeeprom(uint16_t hue, uint8_t sat, uint8_t val){
	inmem_config.raw = rgblight_config.raw;
  if (rgblight_config.enable) {
    struct cRGB tmp_led;
    sethsv(hue, sat, val, &tmp_led);
		inmem_config.hue = hue;
		inmem_config.sat = sat;
		inmem_config.val = val;
    // dprintf("rgblight set hue [MEMORY]: %u,%u,%u\n", inmem_config.hue, inmem_config.sat, inmem_config.val);
    rgblight_setrgb(tmp_led.r, tmp_led.g, tmp_led.b);
  }
}
void rgblight_sethsv(uint16_t hue, uint8_t sat, uint8_t val){
  if (rgblight_config.enable) {
		if (rgblight_config.mode == 1) {
			// same static color
			rgblight_sethsv_noeeprom(hue, sat, val);
		} else {
			// all LEDs in same color
			if (rgblight_config.mode >= 2 && rgblight_config.mode <= 5) {
				// breathing mode, ignore the change of val, use in memory value instead
				val = rgblight_config.val;
			} else if (rgblight_config.mode >= 6 && rgblight_config.mode <= 14) {
				// rainbow mood and rainbow swirl, ignore the change of hue
				hue = rgblight_config.hue;
			}
		}
		rgblight_config.hue = hue;
		rgblight_config.sat = sat;
		rgblight_config.val = val;
		eeconfig_update_rgblight(rgblight_config.raw);
		xprintf("rgblight set hsv [EEPROM]: %u,%u,%u\n", rgblight_config.hue, rgblight_config.sat, rgblight_config.val);
  }
}

void rgblight_setrgb(uint8_t r, uint8_t g, uint8_t b){
  // dprintf("rgblight set rgb: %u,%u,%u\n", r,g,b);
  for (uint8_t i=0;i<RGBLED_NUM;i++) {
    led[i].r = r;
    led[i].g = g;
    led[i].b = b;
  }
  rgblight_set();

}

void rgblight_set(void) {
	if (rgblight_config.enable) {
		ws2812_setleds(led, RGBLED_NUM);
	} else {
		for (uint8_t i=0;i<RGBLED_NUM;i++) {
	    led[i].r = 0;
	    led[i].g = 0;
	    led[i].b = 0;
	  }
		ws2812_setleds(led, RGBLED_NUM);
	}
}


#if !defined(AUDIO_ENABLE) && defined(RGBLIGHT_TIMER)

// Animation timer -- AVR Timer3
void rgblight_timer_init(void) {
	static uint8_t rgblight_timer_is_init = 0;
	if (rgblight_timer_is_init) {
		return;
	}
	rgblight_timer_is_init = 1;
	/* Timer 3 setup */
	TCCR3B = _BV(WGM32) //CTC mode OCR3A as TOP
	      | _BV(CS30); //Clock selelct: clk/1
	/* Set TOP value */
	uint8_t sreg = SREG;
	cli();
	OCR3AH = (RGBLED_TIMER_TOP>>8)&0xff;
	OCR3AL = RGBLED_TIMER_TOP&0xff;
	SREG = sreg;
}
void rgblight_timer_enable(void) {
	TIMSK3 |= _BV(OCIE3A);
	dprintf("TIMER3 enabled.\n");
}
void rgblight_timer_disable(void) {
	TIMSK3 &= ~_BV(OCIE3A);
	dprintf("TIMER3 disabled.\n");
}
void rgblight_timer_toggle(void) {
	TIMSK3 ^= _BV(OCIE3A);
	dprintf("TIMER3 toggled.\n");
}

ISR(TIMER3_COMPA_vect) {
	// Mode = 1, static light, do nothing here
	if (rgblight_config.mode>=2 && rgblight_config.mode<=5) {
		// mode = 2 to 5, breathing mode
		rgblight_effect_breathing(rgblight_config.mode-2);

	} else if (rgblight_config.mode>=6 && rgblight_config.mode<=8) {
		rgblight_effect_rainbow_mood(rgblight_config.mode-6);
	} else if (rgblight_config.mode>=9 && rgblight_config.mode<=14) {
		rgblight_effect_rainbow_swirl(rgblight_config.mode-9);
	} else if (rgblight_config.mode>=15 && rgblight_config.mode<=20) {
		rgblight_effect_snake(rgblight_config.mode-15);
	} else if (rgblight_config.mode>=21 && rgblight_config.mode<=23) {
		rgblight_effect_knight(rgblight_config.mode-21);
	}
}

// effects
void rgblight_effect_breathing(uint8_t interval) {
	static uint8_t pos = 0;
	static uint16_t last_timer = 0;

	if (timer_elapsed(last_timer)<pgm_read_byte(&RGBLED_BREATHING_INTERVALS[interval])) return;
	last_timer = timer_read();

	rgblight_sethsv_noeeprom(rgblight_config.hue, rgblight_config.sat, pgm_read_byte(&RGBLED_BREATHING_TABLE[pos]));
	pos = (pos+1) % 256;
}

void rgblight_effect_rainbow_mood(uint8_t interval) {
	static uint16_t current_hue=0;
	static uint16_t last_timer = 0;

	if (timer_elapsed(last_timer)<pgm_read_byte(&RGBLED_RAINBOW_MOOD_INTERVALS[interval])) return;
	last_timer = timer_read();
	rgblight_sethsv_noeeprom(current_hue, rgblight_config.sat, rgblight_config.val);
	current_hue = (current_hue+1) % 360;
}

void rgblight_effect_rainbow_swirl(uint8_t interval) {
	static uint16_t current_hue=0;
	static uint16_t last_timer = 0;
	uint16_t hue;
	uint8_t i;
	if (timer_elapsed(last_timer)<pgm_read_byte(&RGBLED_RAINBOW_MOOD_INTERVALS[interval/2])) return;
	last_timer = timer_read();
	for (i=0; i<RGBLED_NUM; i++) {
		hue = (360/RGBLED_NUM*i+current_hue)%360;
		sethsv(hue, rgblight_config.sat, rgblight_config.val, &led[i]);
	}
	rgblight_set();

	if (interval % 2) {
		current_hue = (current_hue+1) % 360;
	} else {
		if (current_hue -1 < 0) {
			current_hue = 359;
		} else {
			current_hue = current_hue - 1;
		}

	}
}
void rgblight_effect_snake(uint8_t interval) {
	static uint8_t pos=0;
	static uint16_t last_timer = 0;
	uint8_t i,j;
	int8_t k;
	int8_t increament = 1;
	if (interval%2) increament = -1;
	if (timer_elapsed(last_timer)<pgm_read_byte(&RGBLED_SNAKE_INTERVALS[interval/2])) return;
	last_timer = timer_read();
	for (i=0;i<RGBLED_NUM;i++) {
		led[i].r=0;
		led[i].g=0;
		led[i].b=0;
		for (j=0;j<RGBLIGHT_EFFECT_SNAKE_LENGTH;j++) {
			k = pos+j*increament;
			if (k<0) k = k+RGBLED_NUM;
			if (i==k) {
				sethsv(rgblight_config.hue, rgblight_config.sat, (uint8_t)(rgblight_config.val*(RGBLIGHT_EFFECT_SNAKE_LENGTH-j)/RGBLIGHT_EFFECT_SNAKE_LENGTH), &led[i]);
			}
		}
	}
	rgblight_set();
	if (increament == 1) {
		if (pos - 1 < 0) {
			pos = RGBLED_NUM-1;
		} else {
			pos -= 1;
		}
	} else {
		pos = (pos+1)%RGBLED_NUM;
	}

}

void rgblight_effect_knight(uint8_t interval) {
	static int8_t pos=0;
	static uint16_t last_timer = 0;
	uint8_t i,j,cur;
	int8_t k;
	struct cRGB preled[RGBLED_NUM];
	static int8_t increament = -1;
	if (timer_elapsed(last_timer)<pgm_read_byte(&RGBLED_KNIGHT_INTERVALS[interval])) return;
	last_timer = timer_read();
	for (i=0;i<RGBLED_NUM;i++) {
		preled[i].r=0;
		preled[i].g=0;
		preled[i].b=0;
		for (j=0;j<RGBLIGHT_EFFECT_KNIGHT_LENGTH;j++) {
			k = pos+j*increament;
			if (k<0) k = 0;
			if (k>=RGBLED_NUM) k=RGBLED_NUM-1;
			if (i==k) {
				sethsv(rgblight_config.hue, rgblight_config.sat, rgblight_config.val, &preled[i]);
			}
		}
	}
	if (RGBLIGHT_EFFECT_KNIGHT_OFFSET) {
		for (i=0;i<RGBLED_NUM;i++) {
			cur = (i+RGBLIGHT_EFFECT_KNIGHT_OFFSET) % RGBLED_NUM;
			led[i].r = preled[cur].r;
			led[i].g = preled[cur].g;
			led[i].b = preled[cur].b;
		}
	}
	rgblight_set();
	if (increament == 1) {
		if (pos - 1 < 0 - RGBLIGHT_EFFECT_KNIGHT_LENGTH) {
			pos = 0- RGBLIGHT_EFFECT_KNIGHT_LENGTH;
			increament = -1;
		} else {
			pos -= 1;
		}
	} else {
		if (pos+1>RGBLED_NUM+RGBLIGHT_EFFECT_KNIGHT_LENGTH) {
			pos = RGBLED_NUM+RGBLIGHT_EFFECT_KNIGHT_LENGTH-1;
			increament = 1;
		} else {
			pos += 1;
		}
	}

}

#endif
span class="n">spin_lock_irqsave(&iommu->register_lock, flag); /* Note: Only uses first TLB reg currently */ if ( val_iva ) dmar_writeq(iommu->reg, tlb_offset, val_iva); dmar_writeq(iommu->reg, tlb_offset + 8, val); /* Make sure hardware complete it */ start_time = NOW(); for ( ; ; ) { val = dmar_readq(iommu->reg, tlb_offset + 8); if ( !(val & DMA_TLB_IVT) ) break; if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT ) panic("DMAR hardware is malfunctional, please disable IOMMU\n"); cpu_relax(); } spin_unlock_irqrestore(&iommu->register_lock, flag); /* check IOTLB invalidation granularity */ if ( DMA_TLB_IAIG(val) == 0 ) printk(KERN_ERR VTDPREFIX "IOMMU: flush IOTLB failed\n"); #ifdef VTD_DEBUG if ( DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type) ) printk(KERN_ERR VTDPREFIX "IOMMU: tlb flush request %x, actual %x\n", (u32)DMA_TLB_IIRG(type), (u32)DMA_TLB_IAIG(val)); #endif /* flush context entry will implictly flush write buffer */ return 0; } static int inline iommu_flush_iotlb_global(struct iommu *iommu, int non_present_entry_flush) { struct iommu_flush *flush = iommu_get_flush(iommu); return flush->iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, non_present_entry_flush); } static int inline iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did, int non_present_entry_flush) { struct iommu_flush *flush = iommu_get_flush(iommu); return flush->iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, non_present_entry_flush); } static int inline get_alignment(u64 base, unsigned int size) { int t = 0; u64 end; end = base + size - 1; while ( base != end ) { t++; base >>= 1; end >>= 1; } return t; } static int inline iommu_flush_iotlb_psi( struct iommu *iommu, u16 did, u64 addr, unsigned int pages, int non_present_entry_flush) { unsigned int align; struct iommu_flush *flush = iommu_get_flush(iommu); BUG_ON(addr & (~PAGE_MASK_4K)); BUG_ON(pages == 0); /* Fallback to domain selective flush if no PSI support */ if ( !cap_pgsel_inv(iommu->cap) ) return iommu_flush_iotlb_dsi(iommu, did, non_present_entry_flush); /* * PSI requires page size is 2 ^ x, and the base address is naturally * aligned to the size */ align = get_alignment(addr >> PAGE_SHIFT_4K, pages); /* Fallback to domain selective flush if size is too big */ if ( align > cap_max_amask_val(iommu->cap) ) return iommu_flush_iotlb_dsi(iommu, did, non_present_entry_flush); addr >>= PAGE_SHIFT_4K + align; addr <<= PAGE_SHIFT_4K + align; return flush->iotlb(iommu, did, addr, align, DMA_TLB_PSI_FLUSH, non_present_entry_flush); } void iommu_flush_all(void) { struct acpi_drhd_unit *drhd; struct iommu *iommu; wbinvd(); for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; iommu_flush_context_global(iommu, 0); iommu_flush_iotlb_global(iommu, 0); } } /* clear one page's page table */ static void dma_pte_clear_one(struct domain *domain, u64 addr) { struct hvm_iommu *hd = domain_hvm_iommu(domain); struct acpi_drhd_unit *drhd; struct iommu *iommu; struct dma_pte *page = NULL, *pte = NULL; u64 pg_maddr; /* get last level pte */ pg_maddr = addr_to_dma_page_maddr(domain, addr, 0); if ( pg_maddr == 0 ) return; page = (struct dma_pte *)map_vtd_domain_page(pg_maddr); pte = page + address_level_offset(addr, 1); if ( !dma_pte_present(*pte) ) { unmap_vtd_domain_page(page); return; } dma_clear_pte(*pte); iommu_flush_cache_entry(pte); for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; if ( test_bit(iommu->index, &hd->iommu_bitmap) ) iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain), addr, 1, 0); } unmap_vtd_domain_page(page); } /* clear last level pte, a tlb flush should be followed */ static void dma_pte_clear_range(struct domain *domain, u64 start, u64 end) { struct hvm_iommu *hd = domain_hvm_iommu(domain); int addr_width = agaw_to_width(hd->agaw); start &= (((u64)1) << addr_width) - 1; end &= (((u64)1) << addr_width) - 1; /* in case it's partial page */ start = PAGE_ALIGN_4K(start); end &= PAGE_MASK_4K; /* we don't need lock here, nobody else touches the iova range */ while ( start < end ) { dma_pte_clear_one(domain, start); start += PAGE_SIZE_4K; } } static void iommu_free_pagetable(u64 pt_maddr, int level) { int i; struct dma_pte *pt_vaddr, *pte; int next_level = level - 1; if ( pt_maddr == 0 ) return; pt_vaddr = (struct dma_pte *)map_vtd_domain_page(pt_maddr); for ( i = 0; i < PTE_NUM; i++ ) { pte = &pt_vaddr[i]; if ( !dma_pte_present(*pte) ) continue; if ( next_level >= 1 ) iommu_free_pagetable(dma_pte_addr(*pte), next_level); dma_clear_pte(*pte); iommu_flush_cache_entry(pte); } unmap_vtd_domain_page(pt_vaddr); free_pgtable_maddr(pt_maddr); } static int iommu_set_root_entry(struct iommu *iommu) { u32 cmd, sts; unsigned long flags; s_time_t start_time; if ( iommu->root_maddr != 0 ) { free_pgtable_maddr(iommu->root_maddr); iommu->root_maddr = 0; } spin_lock_irqsave(&iommu->register_lock, flags); iommu->root_maddr = alloc_pgtable_maddr(); if ( iommu->root_maddr == 0 ) return -ENOMEM; dmar_writeq(iommu->reg, DMAR_RTADDR_REG, iommu->root_maddr); cmd = iommu->gcmd | DMA_GCMD_SRTP; dmar_writel(iommu->reg, DMAR_GCMD_REG, cmd); /* Make sure hardware complete it */ start_time = NOW(); for ( ; ; ) { sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); if ( sts & DMA_GSTS_RTPS ) break; if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT ) panic("DMAR hardware is malfunctional, please disable IOMMU\n"); cpu_relax(); } spin_unlock_irqrestore(&iommu->register_lock, flags); return 0; } static int iommu_enable_translation(struct iommu *iommu) { u32 sts; unsigned long flags; s_time_t start_time; dprintk(XENLOG_INFO VTDPREFIX, "iommu_enable_translation: iommu->reg = %p\n", iommu->reg); spin_lock_irqsave(&iommu->register_lock, flags); iommu->gcmd |= DMA_GCMD_TE; dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd); /* Make sure hardware complete it */ start_time = NOW(); for ( ; ; ) { sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); if ( sts & DMA_GSTS_TES ) break; if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT ) panic("DMAR hardware is malfunctional, please disable IOMMU\n"); cpu_relax(); } /* Disable PMRs when VT-d engine takes effect per spec definition */ disable_pmr(iommu); spin_unlock_irqrestore(&iommu->register_lock, flags); return 0; } int iommu_disable_translation(struct iommu *iommu) { u32 sts; unsigned long flags; s_time_t start_time; spin_lock_irqsave(&iommu->register_lock, flags); iommu->gcmd &= ~ DMA_GCMD_TE; dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd); /* Make sure hardware complete it */ start_time = NOW(); for ( ; ; ) { sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); if ( !(sts & DMA_GSTS_TES) ) break; if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT ) panic("DMAR hardware is malfunctional, please disable IOMMU\n"); cpu_relax(); } spin_unlock_irqrestore(&iommu->register_lock, flags); return 0; } static struct iommu *vector_to_iommu[NR_VECTORS]; static int iommu_page_fault_do_one(struct iommu *iommu, int type, u8 fault_reason, u16 source_id, u64 addr) { dprintk(XENLOG_WARNING VTDPREFIX, "iommu_fault:%s: %x:%x.%x addr %"PRIx64" REASON %x " "iommu->reg = %p\n", (type ? "DMA Read" : "DMA Write"), (source_id >> 8), PCI_SLOT(source_id & 0xFF), PCI_FUNC(source_id & 0xFF), addr, fault_reason, iommu->reg); if ( fault_reason < 0x20 ) print_vtd_entries(iommu, (source_id >> 8), (source_id & 0xff), (addr >> PAGE_SHIFT)); return 0; } static void iommu_fault_status(u32 fault_status) { if ( fault_status & DMA_FSTS_PFO ) dprintk(XENLOG_ERR VTDPREFIX, "iommu_fault_status: Fault Overflow\n"); else if ( fault_status & DMA_FSTS_PPF ) dprintk(XENLOG_ERR VTDPREFIX, "iommu_fault_status: Primary Pending Fault\n"); else if ( fault_status & DMA_FSTS_AFO ) dprintk(XENLOG_ERR VTDPREFIX, "iommu_fault_status: Advanced Fault Overflow\n"); else if ( fault_status & DMA_FSTS_APF ) dprintk(XENLOG_ERR VTDPREFIX, "iommu_fault_status: Advanced Pending Fault\n"); else if ( fault_status & DMA_FSTS_IQE ) dprintk(XENLOG_ERR VTDPREFIX, "iommu_fault_status: Invalidation Queue Error\n"); else if ( fault_status & DMA_FSTS_ICE ) dprintk(XENLOG_ERR VTDPREFIX, "iommu_fault_status: Invalidation Completion Error\n"); else if ( fault_status & DMA_FSTS_ITE ) dprintk(XENLOG_ERR VTDPREFIX, "iommu_fault_status: Invalidation Time-out Error\n"); } #define PRIMARY_FAULT_REG_LEN (16) static void iommu_page_fault(int vector, void *dev_id, struct cpu_user_regs *regs) { struct iommu *iommu = dev_id; int reg, fault_index; u32 fault_status; unsigned long flags; dprintk(XENLOG_WARNING VTDPREFIX, "iommu_page_fault: iommu->reg = %p\n", iommu->reg); spin_lock_irqsave(&iommu->register_lock, flags); fault_status = dmar_readl(iommu->reg, DMAR_FSTS_REG); spin_unlock_irqrestore(&iommu->register_lock, flags); iommu_fault_status(fault_status); /* FIXME: ignore advanced fault log */ if ( !(fault_status & DMA_FSTS_PPF) ) return; fault_index = dma_fsts_fault_record_index(fault_status); reg = cap_fault_reg_offset(iommu->cap); for ( ; ; ) { u8 fault_reason; u16 source_id; u32 data; u64 guest_addr; int type; /* highest 32 bits */ spin_lock_irqsave(&iommu->register_lock, flags); data = dmar_readl(iommu->reg, reg + fault_index * PRIMARY_FAULT_REG_LEN + 12); if ( !(data & DMA_FRCD_F) ) { spin_unlock_irqrestore(&iommu->register_lock, flags); break; } fault_reason = dma_frcd_fault_reason(data); type = dma_frcd_type(data); data = dmar_readl(iommu->reg, reg + fault_index * PRIMARY_FAULT_REG_LEN + 8); source_id = dma_frcd_source_id(data); guest_addr = dmar_readq(iommu->reg, reg + fault_index * PRIMARY_FAULT_REG_LEN); guest_addr = dma_frcd_page_addr(guest_addr); /* clear the fault */ dmar_writel(iommu->reg, reg + fault_index * PRIMARY_FAULT_REG_LEN + 12, DMA_FRCD_F); spin_unlock_irqrestore(&iommu->register_lock, flags); iommu_page_fault_do_one(iommu, type, fault_reason, source_id, guest_addr); fault_index++; if ( fault_index > cap_num_fault_regs(iommu->cap) ) fault_index = 0; } /* clear primary fault overflow */ if ( fault_status & DMA_FSTS_PFO ) { spin_lock_irqsave(&iommu->register_lock, flags); dmar_writel(iommu->reg, DMAR_FSTS_REG, DMA_FSTS_PFO); spin_unlock_irqrestore(&iommu->register_lock, flags); } } static void dma_msi_unmask(unsigned int vector) { struct iommu *iommu = vector_to_iommu[vector]; unsigned long flags; /* unmask it */ spin_lock_irqsave(&iommu->register_lock, flags); dmar_writel(iommu->reg, DMAR_FECTL_REG, 0); spin_unlock_irqrestore(&iommu->register_lock, flags); } static void dma_msi_mask(unsigned int vector) { unsigned long flags; struct iommu *iommu = vector_to_iommu[vector]; /* mask it */ spin_lock_irqsave(&iommu->register_lock, flags); dmar_writel(iommu->reg, DMAR_FECTL_REG, DMA_FECTL_IM); spin_unlock_irqrestore(&iommu->register_lock, flags); } static unsigned int dma_msi_startup(unsigned int vector) { dma_msi_unmask(vector); return 0; } static void dma_msi_end(unsigned int vector) { dma_msi_unmask(vector); ack_APIC_irq(); } static void dma_msi_data_init(struct iommu *iommu, int vector) { u32 msi_data = 0; unsigned long flags; /* Fixed, edge, assert mode. Follow MSI setting */ msi_data |= vector & 0xff; msi_data |= 1 << 14; spin_lock_irqsave(&iommu->register_lock, flags); dmar_writel(iommu->reg, DMAR_FEDATA_REG, msi_data); spin_unlock_irqrestore(&iommu->register_lock, flags); } static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu) { u64 msi_address; unsigned long flags; /* Physical, dedicated cpu. Follow MSI setting */ msi_address = (MSI_ADDRESS_HEADER << (MSI_ADDRESS_HEADER_SHIFT + 8)); msi_address |= MSI_PHYSICAL_MODE << 2; msi_address |= MSI_REDIRECTION_HINT_MODE << 3; msi_address |= phy_cpu << MSI_TARGET_CPU_SHIFT; spin_lock_irqsave(&iommu->register_lock, flags); dmar_writel(iommu->reg, DMAR_FEADDR_REG, (u32)msi_address); dmar_writel(iommu->reg, DMAR_FEUADDR_REG, (u32)(msi_address >> 32)); spin_unlock_irqrestore(&iommu->register_lock, flags); } static void dma_msi_set_affinity(unsigned int vector, cpumask_t dest) { struct iommu *iommu = vector_to_iommu[vector]; dma_msi_addr_init(iommu, cpu_physical_id(first_cpu(dest))); } static struct hw_interrupt_type dma_msi_type = { .typename = "DMA_MSI", .startup = dma_msi_startup, .shutdown = dma_msi_mask, .enable = dma_msi_unmask, .disable = dma_msi_mask, .ack = dma_msi_mask, .end = dma_msi_end, .set_affinity = dma_msi_set_affinity, }; int iommu_set_interrupt(struct iommu *iommu) { int vector, ret; vector = assign_irq_vector(AUTO_ASSIGN); vector_to_iommu[vector] = iommu; /* VT-d fault is a MSI, make irq == vector */ irq_vector[vector] = vector; vector_irq[vector] = vector; if ( !vector ) { gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: no vectors\n"); return -EINVAL; } irq_desc[vector].handler = &dma_msi_type; ret = request_irq(vector, iommu_page_fault, 0, "dmar", iommu); if ( ret ) gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: can't request irq\n"); return vector; } static int iommu_alloc(struct acpi_drhd_unit *drhd) { struct iommu *iommu; unsigned long sagaw; int agaw; if ( nr_iommus > MAX_IOMMUS ) { gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: nr_iommus %d > MAX_IOMMUS\n", nr_iommus); return -ENOMEM; } iommu = xmalloc(struct iommu); if ( iommu == NULL ) return -ENOMEM; memset(iommu, 0, sizeof(struct iommu)); iommu->intel = alloc_intel_iommu(); if ( iommu->intel == NULL ) { xfree(iommu); return -ENOMEM; } set_fixmap_nocache(FIX_IOMMU_REGS_BASE_0 + nr_iommus, drhd->address); iommu->reg = (void *)fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus); iommu->index = nr_iommus++; iommu->cap = dmar_readq(iommu->reg, DMAR_CAP_REG); iommu->ecap = dmar_readq(iommu->reg, DMAR_ECAP_REG); /* Calculate number of pagetable levels: between 2 and 4. */ sagaw = cap_sagaw(iommu->cap); for ( agaw = level_to_agaw(4); agaw >= 0; agaw-- ) if ( test_bit(agaw, &sagaw) ) break; if ( agaw < 0 ) { gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: unsupported sagaw %lx\n", sagaw); xfree(iommu); return -ENODEV; } iommu->nr_pt_levels = agaw_to_level(agaw); if ( !ecap_coherent(iommu->ecap) ) iommus_incoherent = 1; spin_lock_init(&iommu->lock); spin_lock_init(&iommu->register_lock); drhd->iommu = iommu; return 0; } static void iommu_free(struct acpi_drhd_unit *drhd) { struct iommu *iommu = drhd->iommu; if ( iommu == NULL ) return; if ( iommu->root_maddr != 0 ) { free_pgtable_maddr(iommu->root_maddr); iommu->root_maddr = 0; } if ( iommu->reg ) iounmap(iommu->reg); free_intel_iommu(iommu->intel); free_irq(iommu->vector); xfree(iommu); drhd->iommu = NULL; } #define guestwidth_to_adjustwidth(gaw) ({ \ int agaw, r = (gaw - 12) % 9; \ agaw = (r == 0) ? gaw : (gaw + 9 - r); \ if ( agaw > 64 ) \ agaw = 64; \ agaw; }) static int intel_iommu_domain_init(struct domain *d) { struct hvm_iommu *hd = domain_hvm_iommu(d); struct iommu *iommu = NULL; u64 i; struct acpi_drhd_unit *drhd; drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list); iommu = drhd->iommu; hd->agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH); if ( d->domain_id == 0 ) { extern int xen_in_range(paddr_t start, paddr_t end); extern int tboot_in_range(paddr_t start, paddr_t end); /* * Set up 1:1 page table for dom0 except the critical segments * like Xen and tboot. */ for ( i = 0; i < max_page; i++ ) { if ( xen_in_range(i << PAGE_SHIFT_4K, (i + 1) << PAGE_SHIFT_4K) || tboot_in_range(i << PAGE_SHIFT_4K, (i + 1) << PAGE_SHIFT_4K) ) continue; iommu_map_page(d, i, i); } setup_dom0_devices(d); setup_dom0_rmrr(d); iommu_flush_all(); for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; if ( iommu_enable_translation(iommu) ) return -EIO; } } return 0; } static int domain_context_mapping_one( struct domain *domain, struct iommu *iommu, u8 bus, u8 devfn) { struct hvm_iommu *hd = domain_hvm_iommu(domain); struct context_entry *context, *context_entries; unsigned long flags; u64 maddr, pgd_maddr; int agaw; maddr = bus_to_context_maddr(iommu, bus); context_entries = (struct context_entry *)map_vtd_domain_page(maddr); context = &context_entries[devfn]; if ( context_present(*context) ) { unmap_vtd_domain_page(context_entries); return 0; } spin_lock_irqsave(&iommu->lock, flags); #ifdef CONTEXT_PASSTHRU if ( ecap_pass_thru(iommu->ecap) && (domain->domain_id == 0) ) context_set_translation_type(*context, CONTEXT_TT_PASS_THRU); else #endif { /* Ensure we have pagetables allocated down to leaf PTE. */ if ( hd->pgd_maddr == 0 ) { addr_to_dma_page_maddr(domain, 0, 1); if ( hd->pgd_maddr == 0 ) { nomem: unmap_vtd_domain_page(context_entries); spin_unlock_irqrestore(&iommu->lock, flags); return -ENOMEM; } } /* Skip top levels of page tables for 2- and 3-level DRHDs. */ pgd_maddr = hd->pgd_maddr; for ( agaw = level_to_agaw(4); agaw != level_to_agaw(iommu->nr_pt_levels); agaw-- ) { struct dma_pte *p = map_vtd_domain_page(pgd_maddr); pgd_maddr = dma_pte_addr(*p); unmap_vtd_domain_page(p); if ( pgd_maddr == 0 ) goto nomem; } context_set_address_root(*context, pgd_maddr); context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL); } /* * domain_id 0 is not valid on Intel's IOMMU, force domain_id to * be 1 based as required by intel's iommu hw. */ context_set_domain_id(context, domain); context_set_address_width(*context, agaw); context_set_fault_enable(*context); context_set_present(*context); iommu_flush_cache_entry(context); unmap_vtd_domain_page(context_entries); /* Context entry was previously non-present (with domid 0). */ iommu_flush_context_device(iommu, 0, (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1); if ( iommu_flush_iotlb_dsi(iommu, 0, 1) ) iommu_flush_write_buffer(iommu); set_bit(iommu->index, &hd->iommu_bitmap); spin_unlock_irqrestore(&iommu->lock, flags); return 0; } #define PCI_BASE_CLASS_BRIDGE 0x06 #define PCI_CLASS_BRIDGE_PCI 0x0604 enum { DEV_TYPE_PCIe_ENDPOINT, DEV_TYPE_PCIe_BRIDGE, DEV_TYPE_PCI_BRIDGE, DEV_TYPE_PCI, }; int pdev_type(u8 bus, u8 devfn) { u16 class_device; u16 status, creg; int pos; u8 d = PCI_SLOT(devfn), f = PCI_FUNC(devfn); class_device = pci_conf_read16(bus, d, f, PCI_CLASS_DEVICE); if ( class_device == PCI_CLASS_BRIDGE_PCI ) { pos = pci_find_next_cap(bus, devfn, PCI_CAPABILITY_LIST, PCI_CAP_ID_EXP); if ( !pos ) return DEV_TYPE_PCI_BRIDGE; creg = pci_conf_read16(bus, d, f, pos + PCI_EXP_FLAGS); return ((creg & PCI_EXP_FLAGS_TYPE) >> 4) == PCI_EXP_TYPE_PCI_BRIDGE ? DEV_TYPE_PCI_BRIDGE : DEV_TYPE_PCIe_BRIDGE; } status = pci_conf_read16(bus, d, f, PCI_STATUS); if ( !(status & PCI_STATUS_CAP_LIST) ) return DEV_TYPE_PCI; if ( pci_find_next_cap(bus, devfn, PCI_CAPABILITY_LIST, PCI_CAP_ID_EXP) ) return DEV_TYPE_PCIe_ENDPOINT; return DEV_TYPE_PCI; } #define MAX_BUSES 256 static struct { u8 map, bus, devfn; } bus2bridge[MAX_BUSES]; static int find_pcie_endpoint(u8 *bus, u8 *devfn) { int cnt = 0; if ( *bus == 0 ) /* assume integrated PCI devices in RC have valid requester-id */ return 1; if ( !bus2bridge[*bus].map ) return 0; while ( bus2bridge[*bus].map ) { *devfn = bus2bridge[*bus].devfn; *bus = bus2bridge[*bus].bus; if ( cnt++ >= MAX_BUSES ) return 0; } return 1; } static int domain_context_mapping(struct domain *domain, u8 bus, u8 devfn) { struct acpi_drhd_unit *drhd; int ret = 0; u16 sec_bus, sub_bus, ob, odf; u32 type; drhd = acpi_find_matched_drhd_unit(bus, devfn); if ( !drhd ) return -ENODEV; type = pdev_type(bus, devfn); switch ( type ) { case DEV_TYPE_PCIe_BRIDGE: break; case DEV_TYPE_PCI_BRIDGE: sec_bus = pci_conf_read8(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), PCI_SECONDARY_BUS); sub_bus = pci_conf_read8(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), PCI_SUBORDINATE_BUS); for ( sub_bus &= 0xff; sec_bus <= sub_bus; sec_bus++ ) { bus2bridge[sec_bus].map = 1; bus2bridge[sec_bus].bus = bus; bus2bridge[sec_bus].devfn = devfn; } break; case DEV_TYPE_PCIe_ENDPOINT: gdprintk(XENLOG_INFO VTDPREFIX, "domain_context_mapping:PCIe: bdf = %x:%x.%x\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); ret = domain_context_mapping_one(domain, drhd->iommu, bus, devfn); break; case DEV_TYPE_PCI: gdprintk(XENLOG_INFO VTDPREFIX, "domain_context_mapping:PCI: bdf = %x:%x.%x\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); ob = bus; odf = devfn; if ( !find_pcie_endpoint(&bus, &devfn) ) { gdprintk(XENLOG_WARNING VTDPREFIX, "domain_context_mapping:invalid"); break; } if ( ob != bus || odf != devfn ) gdprintk(XENLOG_INFO VTDPREFIX, "domain_context_mapping:map: bdf = %x:%x.%x -> %x:%x.%x\n", ob, PCI_SLOT(odf), PCI_FUNC(odf), bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); ret = domain_context_mapping_one(domain, drhd->iommu, bus, devfn); break; default: gdprintk(XENLOG_ERR VTDPREFIX, "domain_context_mapping:unknown type : bdf = %x:%x.%x\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); ret = -EINVAL; break; } return ret; } static int domain_context_unmap_one(struct iommu *iommu, u8 bus, u8 devfn) { struct context_entry *context, *context_entries; unsigned long flags; u64 maddr; maddr = bus_to_context_maddr(iommu, bus); context_entries = (struct context_entry *)map_vtd_domain_page(maddr); context = &context_entries[devfn]; if ( !context_present(*context) ) { unmap_vtd_domain_page(context_entries); return 0; } spin_lock_irqsave(&iommu->lock, flags); context_clear_present(*context); context_clear_entry(*context); iommu_flush_cache_entry(context); iommu_flush_context_global(iommu, 0); iommu_flush_iotlb_global(iommu, 0); unmap_vtd_domain_page(context_entries); spin_unlock_irqrestore(&iommu->lock, flags); return 0; } static int domain_context_unmap(u8 bus, u8 devfn) { struct acpi_drhd_unit *drhd; int ret = 0; u32 type; drhd = acpi_find_matched_drhd_unit(bus, devfn); if ( !drhd ) return -ENODEV; type = pdev_type(bus, devfn); switch ( type ) { case DEV_TYPE_PCIe_BRIDGE: break; case DEV_TYPE_PCI_BRIDGE: ret = domain_context_unmap_one(drhd->iommu, bus, devfn); break; case DEV_TYPE_PCIe_ENDPOINT: ret = domain_context_unmap_one(drhd->iommu, bus, devfn); break; case DEV_TYPE_PCI: if ( find_pcie_endpoint(&bus, &devfn) ) ret = domain_context_unmap_one(drhd->iommu, bus, devfn); break; default: gdprintk(XENLOG_ERR VTDPREFIX, "domain_context_unmap:unknown type: bdf = %x:%x:%x\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); ret = -EINVAL; break; } return ret; } static int reassign_device_ownership( struct domain *source, struct domain *target, u8 bus, u8 devfn) { struct hvm_iommu *source_hd = domain_hvm_iommu(source); struct pci_dev *pdev; struct acpi_drhd_unit *drhd; struct iommu *pdev_iommu; int ret, found = 0; if ( !(pdev = pci_lock_domain_pdev(source, bus, devfn)) ) return -ENODEV; pdev_flr(bus, devfn); drhd = acpi_find_matched_drhd_unit(bus, devfn); pdev_iommu = drhd->iommu; domain_context_unmap(bus, devfn); write_lock(&pcidevs_lock); list_move(&pdev->domain_list, &target->arch.pdev_list); write_unlock(&pcidevs_lock); pdev->domain = target; ret = domain_context_mapping(target, bus, devfn); spin_unlock(&pdev->lock); read_lock(&pcidevs_lock); for_each_pdev ( source, pdev ) { drhd = acpi_find_matched_drhd_unit(pdev->bus, pdev->devfn); if ( drhd->iommu == pdev_iommu ) { found = 1; break; } } read_unlock(&pcidevs_lock); if ( !found ) clear_bit(pdev_iommu->index, &source_hd->iommu_bitmap); return ret; } void return_devices_to_dom0(struct domain *d) { struct pci_dev *pdev; while ( (pdev = pci_lock_domain_pdev(d, -1, -1)) ) { pci_cleanup_msi(pdev); spin_unlock(&pdev->lock); reassign_device_ownership(d, dom0, pdev->bus, pdev->devfn); } #ifdef VTD_DEBUG read_lock(&pcidevs_lock); for_each_pdev ( dom0, pdev ) dprintk(XENLOG_INFO VTDPREFIX, "return_devices_to_dom0:%x: bdf = %x:%x:%x\n", dom0->domain_id, pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); read_unlock(&pcidevs_lock); #endif } void iommu_domain_teardown(struct domain *d) { struct hvm_iommu *hd = domain_hvm_iommu(d); if ( list_empty(&acpi_drhd_units) ) return; return_devices_to_dom0(d); iommu_free_pagetable(hd->pgd_maddr, agaw_to_level(hd->agaw)); hd->pgd_maddr = 0; iommu_domid_release(d); } static int domain_context_mapped(u8 bus, u8 devfn) { struct acpi_drhd_unit *drhd; for_each_drhd_unit ( drhd ) if ( device_context_mapped(drhd->iommu, bus, devfn) ) return 1; return 0; } int intel_iommu_map_page( struct domain *d, unsigned long gfn, unsigned long mfn) { struct hvm_iommu *hd = domain_hvm_iommu(d); struct acpi_drhd_unit *drhd; struct iommu *iommu; struct dma_pte *page = NULL, *pte = NULL; u64 pg_maddr; int pte_present; #ifdef CONTEXT_PASSTHRU /* do nothing if dom0 and iommu supports pass thru */ if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) ) return 0; #endif pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K, 1); if ( pg_maddr == 0 ) return -ENOMEM; page = (struct dma_pte *)map_vtd_domain_page(pg_maddr); pte = page + (gfn & LEVEL_MASK); pte_present = dma_pte_present(*pte); dma_set_pte_addr(*pte, (paddr_t)mfn << PAGE_SHIFT_4K); dma_set_pte_prot(*pte, DMA_PTE_READ | DMA_PTE_WRITE); iommu_flush_cache_entry(pte); unmap_vtd_domain_page(page); for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; if ( !test_bit(iommu->index, &hd->iommu_bitmap) ) continue; if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d), (paddr_t)gfn << PAGE_SHIFT_4K, 1, !pte_present) ) iommu_flush_write_buffer(iommu); } return 0; } int intel_iommu_unmap_page(struct domain *d, unsigned long gfn) { struct acpi_drhd_unit *drhd; struct iommu *iommu; drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list); iommu = drhd->iommu; #ifdef CONTEXT_PASSTHRU /* do nothing if dom0 and iommu supports pass thru */ if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) ) return 0; #endif dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K); return 0; } int iommu_page_mapping(struct domain *domain, paddr_t iova, paddr_t hpa, size_t size, int prot) { struct hvm_iommu *hd = domain_hvm_iommu(domain); struct acpi_drhd_unit *drhd; struct iommu *iommu; u64 start_pfn, end_pfn; struct dma_pte *page = NULL, *pte = NULL; int index; u64 pg_maddr; if ( (prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0 ) return -EINVAL; iova = (iova >> PAGE_SHIFT_4K) << PAGE_SHIFT_4K; start_pfn = hpa >> PAGE_SHIFT_4K; end_pfn = (PAGE_ALIGN_4K(hpa + size)) >> PAGE_SHIFT_4K; index = 0; while ( start_pfn < end_pfn ) { pg_maddr = addr_to_dma_page_maddr(domain, iova + PAGE_SIZE_4K*index, 1); if ( pg_maddr == 0 ) return -ENOMEM; page = (struct dma_pte *)map_vtd_domain_page(pg_maddr); pte = page + (start_pfn & LEVEL_MASK); dma_set_pte_addr(*pte, (paddr_t)start_pfn << PAGE_SHIFT_4K); dma_set_pte_prot(*pte, prot); iommu_flush_cache_entry(pte); unmap_vtd_domain_page(page); start_pfn++; index++; } for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; if ( !test_bit(iommu->index, &hd->iommu_bitmap) ) continue; if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain), iova, index, 1) ) iommu_flush_write_buffer(iommu); } return 0; } int iommu_page_unmapping(struct domain *domain, paddr_t addr, size_t size) { dma_pte_clear_range(domain, addr, addr + size); return 0; } static int iommu_prepare_rmrr_dev(struct domain *d, struct acpi_rmrr_unit *rmrr, u8 bus, u8 devfn) { u64 size; int ret; /* page table init */ size = rmrr->end_address - rmrr->base_address + 1; ret = iommu_page_mapping(d, rmrr->base_address, rmrr->base_address, size, DMA_PTE_READ|DMA_PTE_WRITE); if ( ret ) return ret; if ( domain_context_mapped(bus, devfn) == 0 ) ret = domain_context_mapping(d, bus, devfn); return ret; } static void setup_dom0_devices(struct domain *d) { struct hvm_iommu *hd; struct pci_dev *pdev; int bus, dev, func, ret; u32 l; hd = domain_hvm_iommu(d); write_lock(&pcidevs_lock); for ( bus = 0; bus < 256; bus++ ) { for ( dev = 0; dev < 32; dev++ ) { for ( func = 0; func < 8; func++ ) { l = pci_conf_read32(bus, dev, func, PCI_VENDOR_ID); /* some broken boards return 0 or ~0 if a slot is empty: */ if ( (l == 0xffffffff) || (l == 0x00000000) || (l == 0x0000ffff) || (l == 0xffff0000) ) continue; pdev = alloc_pdev(bus, PCI_DEVFN(dev, func)); pdev->domain = d; list_add(&pdev->domain_list, &d->arch.pdev_list); ret = domain_context_mapping(d, pdev->bus, pdev->devfn); if ( ret != 0 ) gdprintk(XENLOG_ERR VTDPREFIX, "domain_context_mapping failed\n"); } } } write_unlock(&pcidevs_lock); } void clear_fault_bits(struct iommu *iommu) { u64 val; val = dmar_readq( iommu->reg, cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+0x8); dmar_writeq( iommu->reg, cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+8, val); dmar_writel(iommu->reg, DMAR_FSTS_REG, DMA_FSTS_FAULTS); } static int init_vtd_hw(void) { struct acpi_drhd_unit *drhd; struct iommu *iommu; struct iommu_flush *flush = NULL; int vector; int ret; for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; ret = iommu_set_root_entry(iommu); if ( ret ) { gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: set root entry failed\n"); return -EIO; } vector = iommu_set_interrupt(iommu); dma_msi_data_init(iommu, vector); dma_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map))); iommu->vector = vector; clear_fault_bits(iommu); dmar_writel(iommu->reg, DMAR_FECTL_REG, 0); /* initialize flush functions */ flush = iommu_get_flush(iommu); flush->context = flush_context_reg; flush->iotlb = flush_iotlb_reg; } for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; if ( qinval_setup(iommu) != 0 ) dprintk(XENLOG_INFO VTDPREFIX, "Queued Invalidation hardware not found\n"); } for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; if ( intremap_setup(iommu) != 0 ) dprintk(XENLOG_INFO VTDPREFIX, "Interrupt Remapping hardware not found\n"); } return 0; } static void setup_dom0_rmrr(struct domain *d) { struct acpi_rmrr_unit *rmrr; u16 bdf; int ret, i; for_each_rmrr_device ( rmrr, bdf, i ) { ret = iommu_prepare_rmrr_dev(d, rmrr, PCI_BUS(bdf), PCI_DEVFN2(bdf)); if ( ret ) gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: mapping reserved region failed\n"); } } int intel_vtd_setup(void) { struct acpi_drhd_unit *drhd; struct iommu *iommu; if ( !vtd_enabled ) return -ENODEV; spin_lock_init(&domid_bitmap_lock); clflush_size = get_clflush_size(); for_each_drhd_unit ( drhd ) if ( iommu_alloc(drhd) != 0 ) goto error; /* Allocate IO page directory page for the domain. */ drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list); iommu = drhd->iommu; /* Allocate domain id bitmap, and set bit 0 as reserved */ domid_bitmap_size = cap_ndoms(iommu->cap); domid_bitmap = xmalloc_array(unsigned long, BITS_TO_LONGS(domid_bitmap_size)); if ( domid_bitmap == NULL ) goto error; memset(domid_bitmap, 0, domid_bitmap_size / 8); set_bit(0, domid_bitmap); init_vtd_hw(); return 0; error: for_each_drhd_unit ( drhd ) iommu_free(drhd); vtd_enabled = 0; return -ENOMEM; } /* * If the device isn't owned by dom0, it means it already * has been assigned to other domain, or it's not exist. */ int device_assigned(u8 bus, u8 devfn) { struct pci_dev *pdev; if ( (pdev = pci_lock_domain_pdev(dom0, bus, devfn)) ) { spin_unlock(&pdev->lock); return 0; } return 1; } int intel_iommu_assign_device(struct domain *d, u8 bus, u8 devfn) { struct acpi_rmrr_unit *rmrr; int ret = 0, i; u16 bdf; if ( list_empty(&acpi_drhd_units) ) return -ENODEV; ret = reassign_device_ownership(dom0, d, bus, devfn); if ( ret ) return ret; /* Setup rmrr identity mapping */ for_each_rmrr_device( rmrr, bdf, i ) { if ( PCI_BUS(bdf) == bus && PCI_DEVFN2(bdf) == devfn ) { /* FIXME: Because USB RMRR conflicts with guest bios region, * ignore USB RMRR temporarily. */ if ( is_usb_device(bus, devfn) ) return 0; ret = iommu_prepare_rmrr_dev(d, rmrr, bus, devfn); if ( ret ) { gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: mapping reserved region failed\n"); return ret; } } } return ret; } static int intel_iommu_group_id(u8 bus, u8 devfn) { if ( !bus2bridge[bus].map || find_pcie_endpoint(&bus, &devfn) ) return PCI_BDF2(bus, devfn); else return -1; } u8 iommu_state[MAX_IOMMU_REGS * MAX_IOMMUS]; int iommu_suspend(void) { struct acpi_drhd_unit *drhd; struct iommu *iommu; int i = 0; iommu_flush_all(); for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; iommu_state[DMAR_RTADDR_REG * i] = (u64) dmar_readq(iommu->reg, DMAR_RTADDR_REG); iommu_state[DMAR_FECTL_REG * i] = (u32) dmar_readl(iommu->reg, DMAR_FECTL_REG); iommu_state[DMAR_FEDATA_REG * i] = (u32) dmar_readl(iommu->reg, DMAR_FEDATA_REG); iommu_state[DMAR_FEADDR_REG * i] = (u32) dmar_readl(iommu->reg, DMAR_FEADDR_REG); iommu_state[DMAR_FEUADDR_REG * i] = (u32) dmar_readl(iommu->reg, DMAR_FEUADDR_REG); iommu_state[DMAR_PLMBASE_REG * i] = (u32) dmar_readl(iommu->reg, DMAR_PLMBASE_REG); iommu_state[DMAR_PLMLIMIT_REG * i] = (u32) dmar_readl(iommu->reg, DMAR_PLMLIMIT_REG); iommu_state[DMAR_PHMBASE_REG * i] = (u64) dmar_readq(iommu->reg, DMAR_PHMBASE_REG); iommu_state[DMAR_PHMLIMIT_REG * i] = (u64) dmar_readq(iommu->reg, DMAR_PHMLIMIT_REG); i++; } return 0; } int iommu_resume(void) { struct acpi_drhd_unit *drhd; struct iommu *iommu; int i = 0; iommu_flush_all(); init_vtd_hw(); for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; dmar_writeq( iommu->reg, DMAR_RTADDR_REG, (u64) iommu_state[DMAR_RTADDR_REG * i]); dmar_writel(iommu->reg, DMAR_FECTL_REG, (u32) iommu_state[DMAR_FECTL_REG * i]); dmar_writel(iommu->reg, DMAR_FEDATA_REG, (u32) iommu_state[DMAR_FEDATA_REG * i]); dmar_writel(iommu->reg, DMAR_FEADDR_REG, (u32) iommu_state[DMAR_FEADDR_REG * i]); dmar_writel(iommu->reg, DMAR_FEUADDR_REG, (u32) iommu_state[DMAR_FEUADDR_REG * i]); dmar_writel(iommu->reg, DMAR_PLMBASE_REG, (u32) iommu_state[DMAR_PLMBASE_REG * i]); dmar_writel(iommu->reg, DMAR_PLMLIMIT_REG, (u32) iommu_state[DMAR_PLMLIMIT_REG * i]); dmar_writeq(iommu->reg, DMAR_PHMBASE_REG, (u64) iommu_state[DMAR_PHMBASE_REG * i]); dmar_writeq(iommu->reg, DMAR_PHMLIMIT_REG, (u64) iommu_state[DMAR_PHMLIMIT_REG * i]); if ( iommu_enable_translation(iommu) ) return -EIO; i++; } return 0; } struct iommu_ops intel_iommu_ops = { .init = intel_iommu_domain_init, .assign_device = intel_iommu_assign_device, .teardown = iommu_domain_teardown, .map_page = intel_iommu_map_page, .unmap_page = intel_iommu_unmap_page, .reassign_device = reassign_device_ownership, .get_device_group_id = intel_iommu_group_id, }; /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */