aboutsummaryrefslogtreecommitdiffstats
path: root/lib/lufa/Projects/TempDataLogger/Lib/DataflashManager.c
blob: b1111ce39b4fe146c2baa2858e854fcac2be80ae (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
/*
             LUFA Library
     Copyright (C) Dean Camera, 2017.

  dean [at] fourwalledcubicle [dot] com
           www.lufa-lib.org
*/

/*
  Copyright 2017  Dean Camera (dean [at] fourwalledcubicle [dot] com)

  Permission to use, copy, modify, distribute, and sell this
  software and its documentation for any purpose is hereby granted
  without fee, provided that the above copyright notice appear in
  all copies and that both that the copyright notice and this
  permission notice and warranty disclaimer appear in supporting
  documentation, and that the name of the author not be used in
  advertising or publicity pertaining to distribution of the
  software without specific, written prior permission.

  The author disclaims all warranties with regard to this
  software, including all implied warranties of merchantability
  and fitness.  In no event shall the author be liable for any
  special, indirect or consequential damages or any damages
  whatsoever resulting from loss of use, data or profits, whether
  in an action of contract, negligence or other tortious action,
  arising out of or in connection with the use or performance of
  this software.
*/

/** \file
 *
 *  Functions to manage the physical Dataflash media, including reading and writing of
 *  blocks of data. These functions are called by the SCSI layer when data must be stored
 *  or retrieved to/from the physical storage media. If a different media is used (such
 *  as a SD card or EEPROM), functions similar to these will need to be generated.
 */

#define  INCLUDE_FROM_DATAFLASHMANAGER_C
#include "DataflashManager.h"

/** Writes blocks (OS blocks, not Dataflash pages) to the storage medium, the board Dataflash IC(s), from
 *  the pre-selected data OUT endpoint. This routine reads in OS sized blocks from the endpoint and writes
 *  them to the Dataflash in Dataflash page sized blocks.
 *
 *  \param[in] MSInterfaceInfo  Pointer to a structure containing a Mass Storage Class configuration and state
 *  \param[in] BlockAddress  Data block starting address for the write sequence
 *  \param[in] TotalBlocks   Number of blocks of data to write
 */
void DataflashManager_WriteBlocks(USB_ClassInfo_MS_Device_t* const MSInterfaceInfo,
                                  const uint32_t BlockAddress,
                                  uint16_t TotalBlocks)
{
	uint16_t CurrDFPage          = ((BlockAddress * VIRTUAL_MEMORY_BLOCK_SIZE) / DATAFLASH_PAGE_SIZE);
	uint16_t CurrDFPageByte      = ((BlockAddress * VIRTUAL_MEMORY_BLOCK_SIZE) % DATAFLASH_PAGE_SIZE);
	uint8_t  CurrDFPageByteDiv16 = (CurrDFPageByte >> 4);
	bool     UsingSecondBuffer   = false;

	/* Select the correct starting Dataflash IC for the block requested */
	Dataflash_SelectChipFromPage(CurrDFPage);

#if (DATAFLASH_PAGE_SIZE > VIRTUAL_MEMORY_BLOCK_SIZE)
	/* Copy selected dataflash's current page contents to the Dataflash buffer */
	Dataflash_SendByte(DF_CMD_MAINMEMTOBUFF1);
	Dataflash_SendAddressBytes(CurrDFPage, 0);
	Dataflash_WaitWhileBusy();
#endif

	/* Send the Dataflash buffer write command */
	Dataflash_SendByte(DF_CMD_BUFF1WRITE);
	Dataflash_SendAddressBytes(0, CurrDFPageByte);

	/* Wait until endpoint is ready before continuing */
	if (Endpoint_WaitUntilReady())
	  return;

	while (TotalBlocks)
	{
		uint8_t BytesInBlockDiv16 = 0;

		/* Write an endpoint packet sized data block to the Dataflash */
		while (BytesInBlockDiv16 < (VIRTUAL_MEMORY_BLOCK_SIZE >> 4))
		{
			/* Check if the endpoint is currently empty */
			if (!(Endpoint_IsReadWriteAllowed()))
			{
				/* Clear the current endpoint bank */
				Endpoint_ClearOUT();

				/* Wait until the host has sent another packet */
				if (Endpoint_WaitUntilReady())
				  return;
			}

			/* Check if end of Dataflash page reached */
			if (CurrDFPageByteDiv16 == (DATAFLASH_PAGE_SIZE >> 4))
			{
				/* Write the Dataflash buffer contents back to the Dataflash page */
				Dataflash_WaitWhileBusy();
				Dataflash_SendByte(UsingSecondBuffer ? DF_CMD_BUFF2TOMAINMEMWITHERASE : DF_CMD_BUFF1TOMAINMEMWITHERASE);
				Dataflash_SendAddressBytes(CurrDFPage, 0);

				/* Reset the Dataflash buffer counter, increment the page counter */
				CurrDFPageByteDiv16 = 0;
				CurrDFPage++;

				/* Once all the Dataflash ICs have had their first buffers filled, switch buffers to maintain throughput */
				if (Dataflash_GetSelectedChip() == DATAFLASH_CHIP_MASK(DATAFLASH_TOTALCHIPS))
				  UsingSecondBuffer = !(UsingSecondBuffer);

				/* Select the next Dataflash chip based on the new Dataflash page index */
				Dataflash_SelectChipFromPage(CurrDFPage);

#if (DATAFLASH_PAGE_SIZE > VIRTUAL_MEMORY_BLOCK_SIZE)
				/* If less than one Dataflash page remaining, copy over the existing page to preserve trailing data */
				if ((TotalBlocks * (VIRTUAL_MEMORY_BLOCK_SIZE >> 4)) < (DATAFLASH_PAGE_SIZE >> 4))
				{
					/* Copy selected dataflash's current page contents to the Dataflash buffer */
					Dataflash_WaitWhileBusy();
					Dataflash_SendByte(UsingSecondBuffer ? DF_CMD_MAINMEMTOBUFF2 : DF_CMD_MAINMEMTOBUFF1);
					Dataflash_SendAddressBytes(CurrDFPage, 0);
					Dataflash_WaitWhileBusy();
				}
#endif

				/* Send the Dataflash buffer write command */
				Dataflash_SendByte(UsingSecondBuffer ? DF_CMD_BUFF2WRITE : DF_CMD_BUFF1WRITE);
				Dataflash_SendAddressBytes(0, 0);
			}

			/* Write one 16-byte chunk of data to the Dataflash */
			Dataflash_SendByte(Endpoint_Read_8());
			Dataflash_SendByte(Endpoint_Read_8());
			Dataflash_SendByte(Endpoint_Read_8());
			Dataflash_SendByte(Endpoint_Read_8());
			Dataflash_SendByte(Endpoint_Read_8());
			Dataflash_SendByte(Endpoint_Read_8());
			Dataflash_SendByte(Endpoint_Read_8());
			Dataflash_SendByte(Endpoint_Read_8());
			Dataflash_SendByte(Endpoint_Read_8());
			Dataflash_SendByte(Endpoint_Read_8());
			Dataflash_SendByte(Endpoint_Read_8());
			Dataflash_SendByte(Endpoint_Read_8());
			Dataflash_SendByte(Endpoint_Read_8());
			Dataflash_SendByte(Endpoint_Read_8());
			Dataflash_SendByte(Endpoint_Read_8());
			Dataflash_SendByte(Endpoint_Read_8());

			/* Increment the Dataflash page 16 byte block counter */
			CurrDFPageByteDiv16++;

			/* Increment the block 16 byte block counter */
			BytesInBlockDiv16++;

			/* Check if the current command is being aborted by the host */
			if (MSInterfaceInfo->State.IsMassStoreReset)
			  return;
		}

		/* Decrement the blocks remaining counter */
		TotalBlocks--;
	}

	/* Write the Dataflash buffer contents back to the Dataflash page */
	Dataflash_WaitWhileBusy();
	Dataflash_SendByte(UsingSecondBuffer ? DF_CMD_BUFF2TOMAINMEMWITHERASE : DF_CMD_BUFF1TOMAINMEMWITHERASE);
	Dataflash_SendAddressBytes(CurrDFPage, 0x00);
	Dataflash_WaitWhileBusy();

	/* If the endpoint is empty, clear it ready for the next packet from the host */
	if (!(Endpoint_IsReadWriteAllowed()))
	  Endpoint_ClearOUT();

	/* Deselect all Dataflash chips */
	Dataflash_DeselectChip();
}

/** Reads blocks (OS blocks, not Dataflash pages) from the storage medium, the board Dataflash IC(s), into
 *  the pre-selected data IN endpoint. This routine reads in Dataflash page sized blocks from the Dataflash
 *  and writes them in OS sized blocks to the endpoint.
 *
 *  \param[in] MSInterfaceInfo  Pointer to a structure containing a Mass Storage Class configuration and state
 *  \param[in] BlockAddress  Data block starting address for the read sequence
 *  \param[in] TotalBlocks   Number of blocks of data to read
 */
void DataflashManager_ReadBlocks(USB_ClassInfo_MS_Device_t* const MSInterfaceInfo,
                                 const uint32_t BlockAddress,
                                 uint16_t TotalBlocks)
{
	uint16_t CurrDFPage          = ((BlockAddress * VIRTUAL_MEMORY_BLOCK_SIZE) / DATAFLASH_PAGE_SIZE);
	uint16_t CurrDFPageByte      = ((BlockAddress * VIRTUAL_MEMORY_BLOCK_SIZE) % DATAFLASH_PAGE_SIZE);
	uint8_t  CurrDFPageByteDiv16 = (CurrDFPageByte >> 4);

	/* Select the correct starting Dataflash IC for the block requested */
	Dataflash_SelectChipFromPage(CurrDFPage);

	/* Send the Dataflash main memory page read command */
	Dataflash_SendByte(DF_CMD_MAINMEMPAGEREAD);
	Dataflash_SendAddressBytes(CurrDFPage, CurrDFPageByte);
	Dataflash_SendByte(0x00);
	Dataflash_SendByte(0x00);
	Dataflash_SendByte(0x00);
	Dataflash_SendByte(0x00);

	/* Wait until endpoint is ready before continuing */
	if (Endpoint_WaitUntilReady())
	  return;

	while (TotalBlocks)
	{
		uint8_t BytesInBlockDiv16 = 0;

		/* Read an endpoint packet sized data block from the Dataflash */
		while (BytesInBlockDiv16 < (VIRTUAL_MEMORY_BLOCK_SIZE >> 4))
		{
			/* Check if the endpoint is currently full */
			if (!(Endpoint_IsReadWriteAllowed()))
			{
				/* Clear the endpoint bank to send its contents to the host */
				Endpoint_ClearIN();

				/* Wait until the endpoint is ready for more data */
				if (Endpoint_WaitUntilReady())
				  return;
			}

			/* Check if end of Dataflash page reached */
			if (CurrDFPageByteDiv16 == (DATAFLASH_PAGE_SIZE >> 4))
			{
				/* Reset the Dataflash buffer counter, increment the page counter */
				CurrDFPageByteDiv16 = 0;
				CurrDFPage++;

				/* Select the next Dataflash chip based on the new Dataflash page index */
				Dataflash_SelectChipFromPage(CurrDFPage);

				/* Send the Dataflash main memory page read command */
				Dataflash_SendByte(DF_CMD_MAINMEMPAGEREAD);
				Dataflash_SendAddressBytes(CurrDFPage, 0);
				Dataflash_SendByte(0x00);
				Dataflash_SendByte(0x00);
				Dataflash_SendByte(0x00);
				Dataflash_SendByte(0x00);
			}

			/* Read one 16-byte chunk of data from the Dataflash */
			Endpoint_Write_8(Dataflash_ReceiveByte());
			Endpoint_Write_8(Dataflash_ReceiveByte());
			Endpoint_Write_8(Dataflash_ReceiveByte());
			Endpoint_Write_8(Dataflash_ReceiveByte());
			Endpoint_Write_8(Dataflash_ReceiveByte());
			Endpoint_Write_8(Dataflash_ReceiveByte());
			Endpoint_Write_8(Dataflash_ReceiveByte());
			Endpoint_Write_8(Dataflash_ReceiveByte());
			Endpoint_Write_8(Dataflash_ReceiveByte());
			Endpoint_Write_8(Dataflash_ReceiveByte());
			Endpoint_Write_8(Dataflash_ReceiveByte());
			Endpoint_Write_8(Dataflash_ReceiveByte());
			Endpoint_Write_8(Dataflash_ReceiveByte());
			Endpoint_Write_8(Dataflash_ReceiveByte());
			Endpoint_Write_8(Dataflash_ReceiveByte());
			Endpoint_Write_8(Dataflash_ReceiveByte());

			/* Increment the Dataflash page 16 byte block counter */
			CurrDFPageByteDiv16++;

			/* Increment the block 16 byte block counter */
			BytesInBlockDiv16++;

			/* Check if the current command is being aborted by the host */
			if (MSInterfaceInfo->State.IsMassStoreReset)
			  return;
		}

		/* Decrement the blocks remaining counter */
		TotalBlocks--;
	}

	/* If the endpoint is full, send its contents to the host */
	if (!(Endpoint_IsReadWriteAllowed()))
	  Endpoint_ClearIN();

	/* Deselect all Dataflash chips */
	Dataflash_DeselectChip();
}

/** Writes blocks (OS blocks, not Dataflash pages) to the storage medium, the board Dataflash IC(s), from
 *  the given RAM buffer. This routine reads in OS sized blocks from the buffer and writes them to the
 *  Dataflash in Dataflash page sized blocks. This can be linked to FAT libraries to write files to the
 *  Dataflash.
 *
 *  \param[in] BlockAddress  Data block starting address for the write sequence
 *  \param[in] TotalBlocks   Number of blocks of data to write
 *  \param[in] BufferPtr     Pointer to the data source RAM buffer
 */
void DataflashManager_WriteBlocks_RAM(const uint32_t BlockAddress,
                                      uint16_t TotalBlocks,
                                      const uint8_t* BufferPtr)
{
	uint16_t CurrDFPage          = ((BlockAddress * VIRTUAL_MEMORY_BLOCK_SIZE) / DATAFLASH_PAGE_SIZE);
	uint16_t CurrDFPageByte      = ((BlockAddress * VIRTUAL_MEMORY_BLOCK_SIZE) % DATAFLASH_PAGE_SIZE);
	uint8_t  CurrDFPageByteDiv16 = (CurrDFPageByte >> 4);
	bool     UsingSecondBuffer   = false;

	/* Select the correct starting Dataflash IC for the block requested */
	Dataflash_SelectChipFromPage(CurrDFPage);

#if (DATAFLASH_PAGE_SIZE > VIRTUAL_MEMORY_BLOCK_SIZE)
	/* Copy selected dataflash's current page contents to the Dataflash buffer */
	Dataflash_SendByte(DF_CMD_MAINMEMTOBUFF1);
	Dataflash_SendAddressBytes(CurrDFPage, 0);
	Dataflash_WaitWhileBusy();
#endif

	/* Send the Dataflash buffer write command */
	Dataflash_SendByte(DF_CMD_BUFF1WRITE);
	Dataflash_SendAddressBytes(0, CurrDFPageByte);

	while (TotalBlocks)
	{
		uint8_t BytesInBlockDiv16 = 0;

		/* Write an endpoint packet sized data block to the Dataflash */
		while (BytesInBlockDiv16 < (VIRTUAL_MEMORY_BLOCK_SIZE >> 4))
		{
			/* Check if end of Dataflash page reached */
			if (CurrDFPageByteDiv16 == (DATAFLASH_PAGE_SIZE >> 4))
			{
				/* Write the Dataflash buffer contents back to the Dataflash page */
				Dataflash_WaitWhileBusy();
				Dataflash_SendByte(UsingSecondBuffer ? DF_CMD_BUFF2TOMAINMEMWITHERASE : DF_CMD_BUFF1TOMAINMEMWITHERASE);
				Dataflash_SendAddressBytes(CurrDFPage, 0);

				/* Reset the Dataflash buffer counter, increment the page counter */
				CurrDFPageByteDiv16 = 0;
				CurrDFPage++;

				/* Once all the Dataflash ICs have had their first buffers filled, switch buffers to maintain throughput */
				if (Dataflash_GetSelectedChip() == DATAFLASH_CHIP_MASK(DATAFLASH_TOTALCHIPS))
				  UsingSecondBuffer = !(UsingSecondBuffer);

				/* Select the next Dataflash chip based on the new Dataflash page index */
				Dataflash_SelectChipFromPage(CurrDFPage);

#if (DATAFLASH_PAGE_SIZE > VIRTUAL_MEMORY_BLOCK_SIZE)
				/* If less than one Dataflash page remaining, copy over the existing page to preserve trailing data */
				if ((TotalBlocks * (VIRTUAL_MEMORY_BLOCK_SIZE >> 4)) < (DATAFLASH_PAGE_SIZE >> 4))
				{
					/* Copy selected dataflash's current page contents to the Dataflash buffer */
					Dataflash_WaitWhileBusy();
					Dataflash_SendByte(UsingSecondBuffer ? DF_CMD_MAINMEMTOBUFF2 : DF_CMD_MAINMEMTOBUFF1);
					Dataflash_SendAddressBytes(CurrDFPage, 0);
					Dataflash_WaitWhileBusy();
				}
#endif

				/* Send the Dataflash buffer write command */
				Dataflash_ToggleSelectedChipCS();
				Dataflash_SendByte(UsingSecondBuffer ? DF_CMD_BUFF2WRITE : DF_CMD_BUFF1WRITE);
				Dataflash_SendAddressBytes(0, 0);
			}

			/* Write one 16-byte chunk of data to the Dataflash */
			for (uint8_t ByteNum = 0; ByteNum < 16; ByteNum++)
			  Dataflash_SendByte(*(BufferPtr++));

			/* Increment the Dataflash page 16 byte block counter */
			CurrDFPageByteDiv16++;

			/* Increment the block 16 byte block counter */
			BytesInBlockDiv16++;
		}

		/* Decrement the blocks remaining counter */
		TotalBlocks--;
	}

	/* Write the Dataflash buffer contents back to the Dataflash page */
	Dataflash_WaitWhileBusy();
	Dataflash_SendByte(UsingSecondBuffer ? DF_CMD_BUFF2TOMAINMEMWITHERASE : DF_CMD_BUFF1TOMAINMEMWITHERASE);
	Dataflash_SendAddressBytes(CurrDFPage, 0x00);
	Dataflash_WaitWhileBusy();

	/* Deselect all Dataflash chips */
	Dataflash_DeselectChip();
}

/** Reads blocks (OS blocks, not Dataflash pages) from the storage medium, the board Dataflash IC(s), into
 *  the preallocated RAM buffer. This routine reads in Dataflash page sized blocks from the Dataflash
 *  and writes them in OS sized blocks to the given buffer. This can be linked to FAT libraries to read
 *  the files stored on the Dataflash.
 *
 *  \param[in] BlockAddress  Data block starting address for the read sequence
 *  \param[in] TotalBlocks   Number of blocks of data to read
 *  \param[out] BufferPtr    Pointer to the data destination RAM buffer
 */
void DataflashManager_ReadBlocks_RAM(const uint32_t BlockAddress,
                                     uint16_t TotalBlocks,
                                     uint8_t* BufferPtr)
{
	uint16_t CurrDFPage          = ((BlockAddress * VIRTUAL_MEMORY_BLOCK_SIZE) / DATAFLASH_PAGE_SIZE);
	uint16_t CurrDFPageByte      = ((BlockAddress * VIRTUAL_MEMORY_BLOCK_SIZE) % DATAFLASH_PAGE_SIZE);
	uint8_t  CurrDFPageByteDiv16 = (CurrDFPageByte >> 4);

	/* Select the correct starting Dataflash IC for the block requested */
	Dataflash_SelectChipFromPage(CurrDFPage);

	/* Send the Dataflash main memory page read command */
	Dataflash_SendByte(DF_CMD_MAINMEMPAGEREAD);
	Dataflash_SendAddressBytes(CurrDFPage, CurrDFPageByte);
	Dataflash_SendByte(0x00);
	Dataflash_SendByte(0x00);
	Dataflash_SendByte(0x00);
	Dataflash_SendByte(0x00);

	while (TotalBlocks)
	{
		uint8_t BytesInBlockDiv16 = 0;

		/* Read an endpoint packet sized data block from the Dataflash */
		while (BytesInBlockDiv16 < (VIRTUAL_MEMORY_BLOCK_SIZE >> 4))
		{
			/* Check if end of Dataflash page reached */
			if (CurrDFPageByteDiv16 == (DATAFLASH_PAGE_SIZE >> 4))
			{
				/* Reset the Dataflash buffer counter, increment the page counter */
				CurrDFPageByteDiv16 = 0;
				CurrDFPage++;

				/* Select the next Dataflash chip based on the new Dataflash page index */
				Dataflash_SelectChipFromPage(CurrDFPage);

				/* Send the Dataflash main memory page read command */
				Dataflash_SendByte(DF_CMD_MAINMEMPAGEREAD);
				Dataflash_SendAddressBytes(CurrDFPage, 0);
				Dataflash_SendByte(0x00);
				Dataflash_SendByte(0x00);
				Dataflash_SendByte(0x00);
				Dataflash_SendByte(0x00);
			}

			/* Read one 16-byte chunk of data from the Dataflash */
			for (uint8_t ByteNum = 0; ByteNum < 16; ByteNum++)
			  *(BufferPtr++) = Dataflash_ReceiveByte();

			/* Increment the Dataflash page 16 byte block counter */
			CurrDFPageByteDiv16++;

			/* Increment the block 16 byte block counter */
			BytesInBlockDiv16++;
		}

		/* Decrement the blocks remaining counter */
		TotalBlocks--;
	}

	/* Deselect all Dataflash chips */
	Dataflash_DeselectChip();
}

/** Disables the Dataflash memory write protection bits on the board Dataflash ICs, if enabled. */
void DataflashManager_ResetDataflashProtections(void)
{
	/* Select first Dataflash chip, send the read status register command */
	Dataflash_SelectChip(DATAFLASH_CHIP1);
	Dataflash_SendByte(DF_CMD_GETSTATUS);

	/* Check if sector protection is enabled */
	if (Dataflash_ReceiveByte() & DF_STATUS_SECTORPROTECTION_ON)
	{
		Dataflash_ToggleSelectedChipCS();

		/* Send the commands to disable sector protection */
		Dataflash_SendByte(DF_CMD_SECTORPROTECTIONOFF[0]);
		Dataflash_SendByte(DF_CMD_SECTORPROTECTIONOFF[1]);
		Dataflash_SendByte(DF_CMD_SECTORPROTECTIONOFF[2]);
		Dataflash_SendByte(DF_CMD_SECTORPROTECTIONOFF[3]);
	}

	/* Select second Dataflash chip (if present on selected board), send read status register command */
	#if (DATAFLASH_TOTALCHIPS == 2)
	Dataflash_SelectChip(DATAFLASH_CHIP2);
	Dataflash_SendByte(DF_CMD_GETSTATUS);

	/* Check if sector protection is enabled */
	if (Dataflash_ReceiveByte() & DF_STATUS_SECTORPROTECTION_ON)
	{
		Dataflash_ToggleSelectedChipCS();

		/* Send the commands to disable sector protection */
		Dataflash_SendByte(DF_CMD_SECTORPROTECTIONOFF[0]);
		Dataflash_SendByte(DF_CMD_SECTORPROTECTIONOFF[1]);
		Dataflash_SendByte(DF_CMD_SECTORPROTECTIONOFF[2]);
		Dataflash_SendByte(DF_CMD_SECTORPROTECTIONOFF[3]);
	}
	#endif

	/* Deselect current Dataflash chip */
	Dataflash_DeselectChip();
}

/** Performs a simple test on the attached Dataflash IC(s) to ensure that they are working.
 *
 *  \return Boolean \c true if all media chips are working, \c false otherwise
 */
bool DataflashManager_CheckDataflashOperation(void)
{
	uint8_t ReturnByte;

	/* Test first Dataflash IC is present and responding to commands */
	Dataflash_SelectChip(DATAFLASH_CHIP1);
	Dataflash_SendByte(DF_CMD_READMANUFACTURERDEVICEINFO);
	ReturnByte = Dataflash_ReceiveByte();
	Dataflash_DeselectChip();

	/* If returned data is invalid, fail the command */
	if (ReturnByte != DF_MANUFACTURER_ATMEL)
	  return false;

	#if (DATAFLASH_TOTALCHIPS == 2)
	/* Test second Dataflash IC is present and responding to commands */
	Dataflash_SelectChip(DATAFLASH_CHIP2);
	Dataflash_SendByte(DF_CMD_READMANUFACTURERDEVICEINFO);
	ReturnByte = Dataflash_ReceiveByte();
	Dataflash_DeselectChip();

	/* If returned data is invalid, fail the command */
	if (ReturnByte != DF_MANUFACTURER_ATMEL)
	  return false;
	#endif

	return true;
}
span class="n">d->arch.ops->fault(va, regs); } void __update_pagetables(struct vcpu *v) { struct domain *d = v->domain; d->arch.ops->update_pagetables(v); } void __shadow_sync_all(struct domain *d) { d->arch.ops->sync_all(d); } int shadow_remove_all_write_access( struct domain *d, unsigned long readonly_gpfn, unsigned long readonly_gmfn) { return d->arch.ops->remove_all_write_access(d, readonly_gpfn, readonly_gmfn); } int shadow_do_update_va_mapping(unsigned long va, l1_pgentry_t val, struct vcpu *v) { struct domain *d = v->domain; return d->arch.ops->do_update_va_mapping(va, val, v); } struct out_of_sync_entry * shadow_mark_mfn_out_of_sync(struct vcpu *v, unsigned long gpfn, unsigned long mfn) { struct domain *d = v->domain; return d->arch.ops->mark_mfn_out_of_sync(v, gpfn, mfn); } /* * Returns 1 if va's shadow mapping is out-of-sync. * Returns 0 otherwise. */ int __shadow_out_of_sync(struct vcpu *v, unsigned long va) { struct domain *d = v->domain; return d->arch.ops->is_out_of_sync(v, va); } unsigned long gva_to_gpa(unsigned long gva) { struct domain *d = current->domain; return d->arch.ops->gva_to_gpa(gva); } /****************************************************************************/ /****************************************************************************/ #if CONFIG_PAGING_LEVELS >= 4 /* * Convert PAE 3-level page-table to 4-level page-table */ static pagetable_t page_table_convert(struct domain *d) { struct pfn_info *l4page, *l3page; l4_pgentry_t *l4; l3_pgentry_t *l3, *pae_l3; int i; l4page = alloc_domheap_page(NULL); if (l4page == NULL) domain_crash(); l4 = map_domain_page(page_to_pfn(l4page)); memset(l4, 0, PAGE_SIZE); l3page = alloc_domheap_page(NULL); if (l3page == NULL) domain_crash(); l3 = map_domain_page(page_to_pfn(l3page)); memset(l3, 0, PAGE_SIZE); l4[0] = l4e_from_page(l3page, __PAGE_HYPERVISOR); pae_l3 = map_domain_page(pagetable_get_pfn(d->arch.phys_table)); for (i = 0; i < PDP_ENTRIES; i++) { l3[i] = pae_l3[i]; l3e_add_flags(l3[i], 0x67); } unmap_domain_page(l4); unmap_domain_page(l3); return mk_pagetable(page_to_phys(l4page)); } static void alloc_monitor_pagetable(struct vcpu *v) { unsigned long mmfn; l4_pgentry_t *mpl4e; struct pfn_info *mmfn_info; struct domain *d = v->domain; pagetable_t phys_table; ASSERT(!pagetable_get_paddr(v->arch.monitor_table)); /* we should only get called once */ mmfn_info = alloc_domheap_page(NULL); ASSERT( mmfn_info ); mmfn = (unsigned long) (mmfn_info - frame_table); mpl4e = (l4_pgentry_t *) map_domain_page(mmfn); memcpy(mpl4e, &idle_pg_table[0], PAGE_SIZE); mpl4e[l4_table_offset(PERDOMAIN_VIRT_START)] = l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR); /* map the phys_to_machine map into the per domain Read-Only MPT space */ phys_table = page_table_convert(d); mpl4e[l4_table_offset(RO_MPT_VIRT_START)] = l4e_from_paddr(pagetable_get_paddr(phys_table), __PAGE_HYPERVISOR); v->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT); v->arch.monitor_vtable = (l2_pgentry_t *) mpl4e; } static void inline free_shadow_fl1_table(struct domain *d, unsigned long smfn) { l1_pgentry_t *pl1e = map_domain_page(smfn); int i; for (i = 0; i < L1_PAGETABLE_ENTRIES; i++) put_page_from_l1e(pl1e[i], d); } /* * Free l2, l3, l4 shadow tables */ void free_fake_shadow_l2(struct domain *d,unsigned long smfn); static void inline free_shadow_tables(struct domain *d, unsigned long smfn, u32 level) { pgentry_64_t *ple = map_domain_page(smfn); int i, external = shadow_mode_external(d); struct pfn_info *page = &frame_table[smfn]; if (d->arch.ops->guest_paging_levels == PAGING_L2) { #if CONFIG_PAGING_LEVELS >=4 for ( i = 0; i < PDP_ENTRIES; i++ ) { if (entry_get_flags(ple[i]) & _PAGE_PRESENT ) free_fake_shadow_l2(d,entry_get_pfn(ple[i])); } page = &frame_table[entry_get_pfn(ple[0])]; free_domheap_pages(page, SL2_ORDER); unmap_domain_page(ple); #endif } else { for ( i = 0; i < PAGETABLE_ENTRIES; i++ ) if ( external || is_guest_l4_slot(i) ) if ( entry_get_flags(ple[i]) & _PAGE_PRESENT ) put_shadow_ref(entry_get_pfn(ple[i])); unmap_domain_page(ple); } } void free_monitor_pagetable(struct vcpu *v) { unsigned long mfn; /* * free monitor_table. * Note: for VMX guest, only BSP need do this free. */ if (!(VMX_DOMAIN(v) && v->vcpu_id)) { mfn = pagetable_get_pfn(v->arch.monitor_table); unmap_domain_page(v->arch.monitor_vtable); free_domheap_page(&frame_table[mfn]); } v->arch.monitor_table = mk_pagetable(0); v->arch.monitor_vtable = 0; } #elif CONFIG_PAGING_LEVELS == 3 static void alloc_monitor_pagetable(struct vcpu *v) { BUG(); /* PAE not implemented yet */ } void free_monitor_pagetable(struct vcpu *v) { BUG(); /* PAE not implemented yet */ } #elif CONFIG_PAGING_LEVELS == 2 static void alloc_monitor_pagetable(struct vcpu *v) { unsigned long mmfn; l2_pgentry_t *mpl2e; struct pfn_info *mmfn_info; struct domain *d = v->domain; ASSERT(pagetable_get_paddr(v->arch.monitor_table) == 0); mmfn_info = alloc_domheap_page(NULL); ASSERT(mmfn_info != NULL); mmfn = page_to_pfn(mmfn_info); mpl2e = (l2_pgentry_t *)map_domain_page(mmfn); memset(mpl2e, 0, PAGE_SIZE); #ifdef __i386__ /* XXX screws x86/64 build */ memcpy(&mpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE], HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t)); #endif mpl2e[l2_table_offset(PERDOMAIN_VIRT_START)] = l2e_from_paddr(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR); // map the phys_to_machine map into the Read-Only MPT space for this domain mpl2e[l2_table_offset(RO_MPT_VIRT_START)] = l2e_from_paddr(pagetable_get_paddr(d->arch.phys_table), __PAGE_HYPERVISOR); // Don't (yet) have mappings for these... // Don't want to accidentally see the idle_pg_table's linear mapping. // mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = l2e_empty(); mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = l2e_empty(); v->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT); v->arch.monitor_vtable = mpl2e; } /* * Free the pages for monitor_table and hl2_table */ void free_monitor_pagetable(struct vcpu *v) { l2_pgentry_t *mpl2e, hl2e, sl2e; unsigned long mfn; ASSERT( pagetable_get_paddr(v->arch.monitor_table) ); mpl2e = v->arch.monitor_vtable; /* * First get the mfn for hl2_table by looking at monitor_table */ hl2e = mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)]; if ( l2e_get_flags(hl2e) & _PAGE_PRESENT ) { mfn = l2e_get_pfn(hl2e); ASSERT(mfn); put_shadow_ref(mfn); } sl2e = mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)]; if ( l2e_get_flags(sl2e) & _PAGE_PRESENT ) { mfn = l2e_get_pfn(sl2e); ASSERT(mfn); put_shadow_ref(mfn); } unmap_domain_page(mpl2e); /* * Then free monitor_table. * Note: for VMX guest, only BSP need do this free. */ if (!(VMX_DOMAIN(v) && v->vcpu_id)) { mfn = pagetable_get_pfn(v->arch.monitor_table); unmap_domain_page(v->arch.monitor_vtable); free_domheap_page(&frame_table[mfn]); } v->arch.monitor_table = mk_pagetable(0); v->arch.monitor_vtable = 0; } #endif static void shadow_free_snapshot(struct domain *d, struct out_of_sync_entry *entry) { void *snapshot; if ( entry->snapshot_mfn == SHADOW_SNAPSHOT_ELSEWHERE ) return; // Clear the out_of_sync bit. // clear_bit(_PGC_out_of_sync, &frame_table[entry->gmfn].count_info); // XXX Need to think about how to protect the domain's // information less expensively. // snapshot = map_domain_page(entry->snapshot_mfn); memset(snapshot, 0, PAGE_SIZE); unmap_domain_page(snapshot); put_shadow_ref(entry->snapshot_mfn); } void release_out_of_sync_entry(struct domain *d, struct out_of_sync_entry *entry) { struct pfn_info *page; page = &frame_table[entry->gmfn]; // Decrement ref count of guest & shadow pages // put_page(page); // Only use entries that have low bits clear... // if ( !(entry->writable_pl1e & (sizeof(l1_pgentry_t)-1)) ) { put_shadow_ref(entry->writable_pl1e >> PAGE_SHIFT); entry->writable_pl1e = -2; } else ASSERT( entry->writable_pl1e == -1 ); // Free the snapshot // shadow_free_snapshot(d, entry); } static void remove_out_of_sync_entries(struct domain *d, unsigned long gmfn) { struct out_of_sync_entry *entry = d->arch.out_of_sync; struct out_of_sync_entry **prev = &d->arch.out_of_sync; struct out_of_sync_entry *found = NULL; // NB: Be careful not to call something that manipulates this list // while walking it. Collect the results into a separate list // first, then walk that list. // while ( entry ) { if ( entry->gmfn == gmfn ) { // remove from out of sync list *prev = entry->next; // add to found list entry->next = found; found = entry; entry = *prev; continue; } prev = &entry->next; entry = entry->next; } prev = NULL; entry = found; while ( entry ) { release_out_of_sync_entry(d, entry); prev = &entry->next; entry = entry->next; } // Add found list to free list if ( prev ) { *prev = d->arch.out_of_sync_free; d->arch.out_of_sync_free = found; } } static inline void shadow_demote(struct domain *d, unsigned long gpfn, unsigned long gmfn) { if ( !shadow_mode_refcounts(d) ) return; ASSERT(frame_table[gmfn].count_info & PGC_page_table); if ( shadow_max_pgtable_type(d, gpfn, NULL) == PGT_none ) { clear_bit(_PGC_page_table, &frame_table[gmfn].count_info); if ( page_out_of_sync(pfn_to_page(gmfn)) ) { remove_out_of_sync_entries(d, gmfn); } } } static void inline free_shadow_l1_table(struct domain *d, unsigned long smfn) { l1_pgentry_t *pl1e = map_domain_page(smfn); int i; struct pfn_info *spage = pfn_to_page(smfn); u32 min_max = spage->tlbflush_timestamp; int min = SHADOW_MIN(min_max); int max; if (d->arch.ops->guest_paging_levels == PAGING_L2) max = SHADOW_MAX_GUEST32(min_max); else max = SHADOW_MAX(min_max); for ( i = min; i <= max; i++ ) { shadow_put_page_from_l1e(pl1e[i], d); pl1e[i] = l1e_empty(); } unmap_domain_page(pl1e); } static void inline free_shadow_hl2_table(struct domain *d, unsigned long smfn) { l1_pgentry_t *hl2 = map_domain_page(smfn); int i, limit; SH_VVLOG("%s: smfn=%lx freed", __func__, smfn); #ifdef __i386__ if ( shadow_mode_external(d) ) limit = L2_PAGETABLE_ENTRIES; else limit = DOMAIN_ENTRIES_PER_L2_PAGETABLE; #else limit = 0; /* XXX x86/64 XXX */ #endif for ( i = 0; i < limit; i++ ) { if ( l1e_get_flags(hl2[i]) & _PAGE_PRESENT ) put_page(pfn_to_page(l1e_get_pfn(hl2[i]))); } unmap_domain_page(hl2); } static void inline free_shadow_l2_table(struct domain *d, unsigned long smfn, unsigned int type) { l2_pgentry_t *pl2e = map_domain_page(smfn); int i, external = shadow_mode_external(d); for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) if ( external || is_guest_l2_slot(type, i) ) if ( l2e_get_flags(pl2e[i]) & _PAGE_PRESENT ) put_shadow_ref(l2e_get_pfn(pl2e[i])); if ( (PGT_base_page_table == PGT_l2_page_table) && shadow_mode_translate(d) && !external ) { // free the ref to the hl2 // put_shadow_ref(l2e_get_pfn(pl2e[l2_table_offset(LINEAR_PT_VIRT_START)])); } unmap_domain_page(pl2e); } void free_fake_shadow_l2(struct domain *d, unsigned long smfn) { pgentry_64_t *ple = map_domain_page(smfn); int i; for ( i = 0; i < PAGETABLE_ENTRIES; i = i + 2 ) { if ( entry_get_flags(ple[i]) & _PAGE_PRESENT ) put_shadow_ref(entry_get_pfn(ple[i])); } unmap_domain_page(ple); } void free_shadow_page(unsigned long smfn) { struct pfn_info *page = &frame_table[smfn]; unsigned long gmfn = page->u.inuse.type_info & PGT_mfn_mask; struct domain *d = page_get_owner(pfn_to_page(gmfn)); unsigned long gpfn = __mfn_to_gpfn(d, gmfn); unsigned long type = page->u.inuse.type_info & PGT_type_mask; SH_VVLOG("%s: free'ing smfn=%lx", __func__, smfn); ASSERT( ! IS_INVALID_M2P_ENTRY(gpfn) ); #if CONFIG_PAGING_LEVELS >=4 if (type == PGT_fl1_shadow) { unsigned long mfn; mfn = __shadow_status(d, gpfn, PGT_fl1_shadow); if (!mfn) gpfn |= (1UL << 63); } #endif delete_shadow_status(d, gpfn, gmfn, type); switch ( type ) { case PGT_l1_shadow: perfc_decr(shadow_l1_pages); shadow_demote(d, gpfn, gmfn); free_shadow_l1_table(d, smfn); d->arch.shadow_page_count--; break; #if defined (__i386__) case PGT_l2_shadow: perfc_decr(shadow_l2_pages); shadow_demote(d, gpfn, gmfn); free_shadow_l2_table(d, smfn, page->u.inuse.type_info); d->arch.shadow_page_count--; break; case PGT_hl2_shadow: perfc_decr(hl2_table_pages); shadow_demote(d, gpfn, gmfn); free_shadow_hl2_table(d, smfn); d->arch.hl2_page_count--; break; #else case PGT_l2_shadow: case PGT_l3_shadow: case PGT_l4_shadow: shadow_demote(d, gpfn, gmfn); free_shadow_tables(d, smfn, shadow_type_to_level(type)); d->arch.shadow_page_count--; break; case PGT_fl1_shadow: free_shadow_fl1_table(d, smfn); d->arch.shadow_page_count--; break; #endif case PGT_snapshot: perfc_decr(apshot_pages); break; default: printk("Free shadow weird page type mfn=%lx type=%" PRtype_info "\n", page_to_pfn(page), page->u.inuse.type_info); break; } // No TLB flushes are needed the next time this page gets allocated. // page->tlbflush_timestamp = 0; page->u.free.cpumask = CPU_MASK_NONE; if ( type == PGT_l1_shadow ) { list_add(&page->list, &d->arch.free_shadow_frames); perfc_incr(free_l1_pages); } else free_domheap_page(page); } static void free_writable_pte_predictions(struct domain *d) { int i; struct shadow_status *x; for ( i = 0; i < shadow_ht_buckets; i++ ) { u32 count; unsigned long *gpfn_list; /* Skip empty buckets. */ if ( d->arch.shadow_ht[i].gpfn_and_flags == 0 ) continue; count = 0; for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next ) if ( (x->gpfn_and_flags & PGT_type_mask) == PGT_writable_pred ) count++; gpfn_list = xmalloc_array(unsigned long, count); count = 0; for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next ) if ( (x->gpfn_and_flags & PGT_type_mask) == PGT_writable_pred ) gpfn_list[count++] = x->gpfn_and_flags & PGT_mfn_mask; while ( count ) { count--; delete_shadow_status(d, gpfn_list[count], 0, PGT_writable_pred); } xfree(gpfn_list); } } static void free_shadow_ht_entries(struct domain *d) { struct shadow_status *x, *n; SH_VLOG("freed tables count=%d l1=%d l2=%d", d->arch.shadow_page_count, perfc_value(shadow_l1_pages), perfc_value(shadow_l2_pages)); n = d->arch.shadow_ht_extras; while ( (x = n) != NULL ) { d->arch.shadow_extras_count--; n = *((struct shadow_status **)(&x[shadow_ht_extra_size])); xfree(x); } d->arch.shadow_ht_extras = NULL; d->arch.shadow_ht_free = NULL; ASSERT(d->arch.shadow_extras_count == 0); SH_LOG("freed extras, now %d", d->arch.shadow_extras_count); if ( d->arch.shadow_dirty_bitmap != NULL ) { xfree(d->arch.shadow_dirty_bitmap); d->arch.shadow_dirty_bitmap = 0; d->arch.shadow_dirty_bitmap_size = 0; } xfree(d->arch.shadow_ht); d->arch.shadow_ht = NULL; } static void free_out_of_sync_entries(struct domain *d) { struct out_of_sync_entry *x, *n; n = d->arch.out_of_sync_extras; while ( (x = n) != NULL ) { d->arch.out_of_sync_extras_count--; n = *((struct out_of_sync_entry **)(&x[out_of_sync_extra_size])); xfree(x); } d->arch.out_of_sync_extras = NULL; d->arch.out_of_sync_free = NULL; d->arch.out_of_sync = NULL; ASSERT(d->arch.out_of_sync_extras_count == 0); FSH_LOG("freed extra out_of_sync entries, now %d", d->arch.out_of_sync_extras_count); } void free_shadow_pages(struct domain *d) { int i; struct shadow_status *x; struct vcpu *v; /* * WARNING! The shadow page table must not currently be in use! * e.g., You are expected to have paused the domain and synchronized CR3. */ if( !d->arch.shadow_ht ) return; shadow_audit(d, 1); // first, remove any outstanding refs from out_of_sync entries... // free_out_of_sync_state(d); // second, remove any outstanding refs from v->arch.shadow_table // and CR3. // for_each_vcpu(d, v) { if ( pagetable_get_paddr(v->arch.shadow_table) ) { put_shadow_ref(pagetable_get_pfn(v->arch.shadow_table)); v->arch.shadow_table = mk_pagetable(0); } if ( v->arch.monitor_shadow_ref ) { put_shadow_ref(v->arch.monitor_shadow_ref); v->arch.monitor_shadow_ref = 0; } } #if defined (__i386__) // For external shadows, remove the monitor table's refs // if ( shadow_mode_external(d) ) { for_each_vcpu(d, v) { l2_pgentry_t *mpl2e = v->arch.monitor_vtable; if ( mpl2e ) { l2_pgentry_t hl2e = mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)]; l2_pgentry_t smfn = mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)]; if ( l2e_get_flags(hl2e) & _PAGE_PRESENT ) { put_shadow_ref(l2e_get_pfn(hl2e)); mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = l2e_empty(); } if ( l2e_get_flags(smfn) & _PAGE_PRESENT ) { put_shadow_ref(l2e_get_pfn(smfn)); mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = l2e_empty(); } } } } #endif // Now, the only refs to shadow pages that are left are from the shadow // pages themselves. We just unpin the pinned pages, and the rest // should automatically disappear. // // NB: Beware: each explicitly or implicit call to free_shadow_page // can/will result in the hash bucket getting rewritten out from // under us... First, collect the list of pinned pages, then // free them. // for ( i = 0; i < shadow_ht_buckets; i++ ) { u32 count; unsigned long *mfn_list; /* Skip empty buckets. */ if ( d->arch.shadow_ht[i].gpfn_and_flags == 0 ) continue; count = 0; for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next ) if ( MFN_PINNED(x->smfn) ) count++; if ( !count ) continue; mfn_list = xmalloc_array(unsigned long, count); count = 0; for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next ) if ( MFN_PINNED(x->smfn) ) mfn_list[count++] = x->smfn; while ( count ) { shadow_unpin(mfn_list[--count]); } xfree(mfn_list); } // Now free the pre-zero'ed pages from the domain // struct list_head *list_ent, *tmp; list_for_each_safe(list_ent, tmp, &d->arch.free_shadow_frames) { list_del(list_ent); perfc_decr(free_l1_pages); struct pfn_info *page = list_entry(list_ent, struct pfn_info, list); if (d->arch.ops->guest_paging_levels == PAGING_L2) { #if CONFIG_PAGING_LEVELS >=4 free_domheap_pages(page, SL1_ORDER); #else free_domheap_page(page); #endif } else free_domheap_page(page); } shadow_audit(d, 0); SH_LOG("Free shadow table."); } void __shadow_mode_disable(struct domain *d) { if ( unlikely(!shadow_mode_enabled(d)) ) return; free_shadow_pages(d); free_writable_pte_predictions(d); #ifndef NDEBUG int i; for ( i = 0; i < shadow_ht_buckets; i++ ) { if ( d->arch.shadow_ht[i].gpfn_and_flags != 0 ) { printk("%s: d->arch.shadow_ht[%x].gpfn_and_flags=%lx\n", __FILE__, i, d->arch.shadow_ht[i].gpfn_and_flags); BUG(); } } #endif d->arch.shadow_mode = 0; free_shadow_ht_entries(d); free_out_of_sync_entries(d); struct vcpu *v; for_each_vcpu(d, v) { update_pagetables(v); } } static void free_p2m_table(struct domain *d) { // uh, this needs some work... :) BUG(); } int __shadow_mode_enable(struct domain *d, unsigned int mode) { struct vcpu *v; int new_modes = (mode & ~d->arch.shadow_mode); // Gotta be adding something to call this function. ASSERT(new_modes); // can't take anything away by calling this function. ASSERT(!(d->arch.shadow_mode & ~mode)); #if defined(CONFIG_PAGING_LEVELS) if(!shadow_set_guest_paging_levels(d, CONFIG_PAGING_LEVELS)) { printk("Unsupported guest paging levels\n"); domain_crash_synchronous(); /* need to take a clean path */ } #endif for_each_vcpu(d, v) { invalidate_shadow_ldt(v); // We need to set these up for __update_pagetables(). // See the comment there. /* * arch.guest_vtable */ if ( v->arch.guest_vtable && (v->arch.guest_vtable != __linear_l2_table) ) { unmap_domain_page(v->arch.guest_vtable); } if ( (mode & (SHM_translate | SHM_external)) == SHM_translate ) v->arch.guest_vtable = __linear_l2_table; else v->arch.guest_vtable = NULL; /* * arch.shadow_vtable */ if ( v->arch.shadow_vtable && (v->arch.shadow_vtable != __shadow_linear_l2_table) ) { unmap_domain_page(v->arch.shadow_vtable); } if ( !(mode & SHM_external) && d->arch.ops->guest_paging_levels == 2) v->arch.shadow_vtable = __shadow_linear_l2_table; else v->arch.shadow_vtable = NULL; #if defined (__i386__) /* * arch.hl2_vtable */ if ( v->arch.hl2_vtable && (v->arch.hl2_vtable != __linear_hl2_table) ) { unmap_domain_page(v->arch.hl2_vtable); } if ( (mode & (SHM_translate | SHM_external)) == SHM_translate ) v->arch.hl2_vtable = __linear_hl2_table; else v->arch.hl2_vtable = NULL; #endif /* * arch.monitor_table & arch.monitor_vtable */ if ( v->arch.monitor_vtable ) { free_monitor_pagetable(v); } if ( mode & SHM_external ) { alloc_monitor_pagetable(v); } } if ( new_modes & SHM_enable ) { ASSERT( !d->arch.shadow_ht ); d->arch.shadow_ht = xmalloc_array(struct shadow_status, shadow_ht_buckets); if ( d->arch.shadow_ht == NULL ) goto nomem; memset(d->arch.shadow_ht, 0, shadow_ht_buckets * sizeof(struct shadow_status)); } if ( new_modes & SHM_log_dirty ) { ASSERT( !d->arch.shadow_dirty_bitmap ); d->arch.shadow_dirty_bitmap_size = (d->max_pages + 63) & ~63; d->arch.shadow_dirty_bitmap = xmalloc_array(unsigned long, d->arch.shadow_dirty_bitmap_size / (8 * sizeof(unsigned long))); if ( d->arch.shadow_dirty_bitmap == NULL ) { d->arch.shadow_dirty_bitmap_size = 0; goto nomem; } memset(d->arch.shadow_dirty_bitmap, 0, d->arch.shadow_dirty_bitmap_size/8); } if ( new_modes & SHM_translate ) { if ( !(new_modes & SHM_external) ) { ASSERT( !pagetable_get_paddr(d->arch.phys_table) ); if ( !alloc_p2m_table(d) ) { printk("alloc_p2m_table failed (out-of-memory?)\n"); goto nomem; } } else { // external guests provide their own memory for their P2M maps. // ASSERT( d == page_get_owner( &frame_table[pagetable_get_pfn(d->arch.phys_table)]) ); } } printk("audit1\n"); _audit_domain(d, AUDIT_SHADOW_ALREADY_LOCKED | AUDIT_ERRORS_OK); printk("audit1 done\n"); // Get rid of any shadow pages from any previous shadow mode. // free_shadow_pages(d); printk("audit2\n"); _audit_domain(d, AUDIT_SHADOW_ALREADY_LOCKED | AUDIT_ERRORS_OK); printk("audit2 done\n"); /* * Tear down it's counts by disassembling its page-table-based ref counts. * Also remove CR3's gcount/tcount. * That leaves things like GDTs and LDTs and external refs in tact. * * Most pages will be writable tcount=0. * Some will still be L1 tcount=0 or L2 tcount=0. * Maybe some pages will be type none tcount=0. * Pages granted external writable refs (via grant tables?) will * still have a non-zero tcount. That's OK. * * gcounts will generally be 1 for PGC_allocated. * GDTs and LDTs will have additional gcounts. * Any grant-table based refs will still be in the gcount. * * We attempt to grab writable refs to each page (thus setting its type). * Immediately put back those type refs. * * Assert that no pages are left with L1/L2/L3/L4 type. */ audit_adjust_pgtables(d, -1, 1); d->arch.shadow_mode = mode; if ( shadow_mode_refcounts(d) ) { struct list_head *list_ent = d->page_list.next; while ( list_ent != &d->page_list ) { struct pfn_info *page = list_entry(list_ent, struct pfn_info, list); if ( !get_page_type(page, PGT_writable_page) ) BUG(); put_page_type(page); /* * We use tlbflush_timestamp as back pointer to smfn, and need to * clean up it. */ if ( shadow_mode_external(d) ) page->tlbflush_timestamp = 0; list_ent = page->list.next; } } audit_adjust_pgtables(d, 1, 1); printk("audit3\n"); _audit_domain(d, AUDIT_SHADOW_ALREADY_LOCKED | AUDIT_ERRORS_OK); printk("audit3 done\n"); return 0; nomem: if ( (new_modes & SHM_enable) ) { xfree(d->arch.shadow_ht); d->arch.shadow_ht = NULL; } if ( (new_modes & SHM_log_dirty) ) { xfree(d->arch.shadow_dirty_bitmap); d->arch.shadow_dirty_bitmap = NULL; } if ( (new_modes & SHM_translate) && !(new_modes & SHM_external) && pagetable_get_paddr(d->arch.phys_table) ) { free_p2m_table(d); } return -ENOMEM; } int shadow_mode_enable(struct domain *d, unsigned int mode) { int rc; shadow_lock(d); rc = __shadow_mode_enable(d, mode); shadow_unlock(d); return rc; } static int shadow_mode_table_op( struct domain *d, dom0_shadow_control_t *sc) { unsigned int op = sc->op; int i, rc = 0; struct vcpu *v; ASSERT(shadow_lock_is_acquired(d)); SH_VLOG("shadow mode table op %lx %lx count %d", (unsigned long)pagetable_get_pfn(d->vcpu[0]->arch.guest_table), /* XXX SMP */ (unsigned long)pagetable_get_pfn(d->vcpu[0]->arch.shadow_table), /* XXX SMP */ d->arch.shadow_page_count); shadow_audit(d, 1); switch ( op ) { case DOM0_SHADOW_CONTROL_OP_FLUSH: free_shadow_pages(d); d->arch.shadow_fault_count = 0; d->arch.shadow_dirty_count = 0; d->arch.shadow_dirty_net_count = 0; d->arch.shadow_dirty_block_count = 0; break; case DOM0_SHADOW_CONTROL_OP_CLEAN: free_shadow_pages(d); sc->stats.fault_count = d->arch.shadow_fault_count; sc->stats.dirty_count = d->arch.shadow_dirty_count; sc->stats.dirty_net_count = d->arch.shadow_dirty_net_count; sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count; d->arch.shadow_fault_count = 0; d->arch.shadow_dirty_count = 0; d->arch.shadow_dirty_net_count = 0; d->arch.shadow_dirty_block_count = 0; if ( (d->max_pages > sc->pages) || (sc->dirty_bitmap == NULL) || (d->arch.shadow_dirty_bitmap == NULL) ) { rc = -EINVAL; break; } sc->pages = d->max_pages; #define chunk (8*1024) /* Transfer and clear in 1kB chunks for L1 cache. */ for ( i = 0; i < d->max_pages; i += chunk ) { int bytes = ((((d->max_pages - i) > chunk) ? chunk : (d->max_pages - i)) + 7) / 8; if (copy_to_user( sc->dirty_bitmap + (i/(8*sizeof(unsigned long))), d->arch.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))), bytes)) { // copy_to_user can fail when copying to guest app memory. // app should zero buffer after mallocing, and pin it rc = -EINVAL; memset( d->arch.shadow_dirty_bitmap + (i/(8*sizeof(unsigned long))), 0, (d->max_pages/8) - (i/(8*sizeof(unsigned long)))); break; } memset( d->arch.shadow_dirty_bitmap + (i/(8*sizeof(unsigned long))), 0, bytes); } break; case DOM0_SHADOW_CONTROL_OP_PEEK: sc->stats.fault_count = d->arch.shadow_fault_count; sc->stats.dirty_count = d->arch.shadow_dirty_count; sc->stats.dirty_net_count = d->arch.shadow_dirty_net_count; sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count; if ( (d->max_pages > sc->pages) || (sc->dirty_bitmap == NULL) || (d->arch.shadow_dirty_bitmap == NULL) ) { rc = -EINVAL; break; } sc->pages = d->max_pages; if (copy_to_user( sc->dirty_bitmap, d->arch.shadow_dirty_bitmap, (d->max_pages+7)/8)) { rc = -EINVAL; break; } break; default: rc = -EINVAL; break; } SH_VLOG("shadow mode table op : page count %d", d->arch.shadow_page_count); shadow_audit(d, 1); for_each_vcpu(d,v) __update_pagetables(v); return rc; } int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc) { unsigned int op = sc->op; int rc = 0; struct vcpu *v; if ( unlikely(d == current->domain) ) { DPRINTK("Don't try to do a shadow op on yourself!\n"); return -EINVAL; } domain_pause(d); shadow_lock(d); switch ( op ) { case DOM0_SHADOW_CONTROL_OP_OFF: __shadow_sync_all(d); __shadow_mode_disable(d); break; case DOM0_SHADOW_CONTROL_OP_ENABLE_TEST: free_shadow_pages(d); rc = __shadow_mode_enable(d, SHM_enable); break; case DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY: free_shadow_pages(d); rc = __shadow_mode_enable( d, d->arch.shadow_mode|SHM_enable|SHM_log_dirty); break; case DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE: free_shadow_pages(d); rc = __shadow_mode_enable( d, d->arch.shadow_mode|SHM_enable|SHM_refcounts|SHM_translate); break; default: rc = shadow_mode_enabled(d) ? shadow_mode_table_op(d, sc) : -EINVAL; break; } shadow_unlock(d); for_each_vcpu(d,v) update_pagetables(v); domain_unpause(d); return rc; } void shadow_mode_init(void) { } int _shadow_mode_refcounts(struct domain *d) { return shadow_mode_refcounts(d); } int set_p2m_entry(struct domain *d, unsigned long pfn, unsigned long mfn, struct domain_mmap_cache *l2cache, struct domain_mmap_cache *l1cache) { unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table); l2_pgentry_t *l2, l2e; l1_pgentry_t *l1; struct pfn_info *l1page; unsigned long va = pfn << PAGE_SHIFT; ASSERT(tabpfn != 0); l2 = map_domain_page_with_cache(tabpfn, l2cache); l2e = l2[l2_table_offset(va)]; if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) { l1page = alloc_domheap_page(NULL); if ( !l1page ) { unmap_domain_page_with_cache(l2, l2cache); return 0; } l1 = map_domain_page_with_cache(page_to_pfn(l1page), l1cache); memset(l1, 0, PAGE_SIZE); unmap_domain_page_with_cache(l1, l1cache); l2e = l2e_from_page(l1page, __PAGE_HYPERVISOR); l2[l2_table_offset(va)] = l2e; } unmap_domain_page_with_cache(l2, l2cache); l1 = map_domain_page_with_cache(l2e_get_pfn(l2e), l1cache); l1[l1_table_offset(va)] = l1e_from_pfn(mfn, __PAGE_HYPERVISOR); unmap_domain_page_with_cache(l1, l1cache); return 1; } int alloc_p2m_table(struct domain *d) { struct list_head *list_ent; struct pfn_info *page, *l2page; l2_pgentry_t *l2; unsigned long mfn, pfn; struct domain_mmap_cache l1cache, l2cache; l2page = alloc_domheap_page(NULL); if ( l2page == NULL ) return 0; domain_mmap_cache_init(&l1cache); domain_mmap_cache_init(&l2cache); d->arch.phys_table = mk_pagetable(page_to_phys(l2page)); l2 = map_domain_page_with_cache(page_to_pfn(l2page), &l2cache); memset(l2, 0, PAGE_SIZE); unmap_domain_page_with_cache(l2, &l2cache); list_ent = d->page_list.next; while ( list_ent != &d->page_list ) { page = list_entry(list_ent, struct pfn_info, list); mfn = page_to_pfn(page); pfn = get_pfn_from_mfn(mfn); ASSERT(pfn != INVALID_M2P_ENTRY); ASSERT(pfn < (1u<<20)); set_p2m_entry(d, pfn, mfn, &l2cache, &l1cache); list_ent = page->list.next; } list_ent = d->xenpage_list.next; while ( list_ent != &d->xenpage_list ) { page = list_entry(list_ent, struct pfn_info, list); mfn = page_to_pfn(page); pfn = get_pfn_from_mfn(mfn); if ( (pfn != INVALID_M2P_ENTRY) && (pfn < (1u<<20)) ) { set_p2m_entry(d, pfn, mfn, &l2cache, &l1cache); } list_ent = page->list.next; } domain_mmap_cache_destroy(&l2cache); domain_mmap_cache_destroy(&l1cache); return 1; } void shadow_l1_normal_pt_update( struct domain *d, unsigned long pa, l1_pgentry_t gpte, struct domain_mmap_cache *cache) { unsigned long sl1mfn; l1_pgentry_t *spl1e, spte; shadow_lock(d); sl1mfn = __shadow_status(current->domain, pa >> PAGE_SHIFT, PGT_l1_shadow); if ( sl1mfn ) { SH_VVLOG("shadow_l1_normal_pt_update pa=%p, gpte=%" PRIpte, (void *)pa, l1e_get_intpte(gpte)); l1pte_propagate_from_guest(current->domain, gpte, &spte); spl1e = map_domain_page_with_cache(sl1mfn, cache); spl1e[(pa & ~PAGE_MASK) / sizeof(l1_pgentry_t)] = spte; unmap_domain_page_with_cache(spl1e, cache); } shadow_unlock(d); } void shadow_l2_normal_pt_update( struct domain *d, unsigned long pa, l2_pgentry_t gpde, struct domain_mmap_cache *cache) { unsigned long sl2mfn; l2_pgentry_t *spl2e; shadow_lock(d); sl2mfn = __shadow_status(current->domain, pa >> PAGE_SHIFT, PGT_l2_shadow); if ( sl2mfn ) { SH_VVLOG("shadow_l2_normal_pt_update pa=%p, gpde=%" PRIpte, (void *)pa, l2e_get_intpte(gpde)); spl2e = map_domain_page_with_cache(sl2mfn, cache); validate_pde_change(d, gpde, &spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)]); unmap_domain_page_with_cache(spl2e, cache); } shadow_unlock(d); } #if CONFIG_PAGING_LEVELS >= 3 void shadow_l3_normal_pt_update( struct domain *d, unsigned long pa, l3_pgentry_t gpde, struct domain_mmap_cache *cache) { unsigned long sl3mfn; pgentry_64_t *spl3e; shadow_lock(d); sl3mfn = __shadow_status(current->domain, pa >> PAGE_SHIFT, PGT_l3_shadow); if ( sl3mfn ) { SH_VVLOG("shadow_l3_normal_pt_update pa=%p, gpde=%" PRIpte, (void *)pa, l3e_get_intpte(gpde)); spl3e = (pgentry_64_t *) map_domain_page_with_cache(sl3mfn, cache); validate_entry_change(d, (pgentry_64_t *) &gpde, &spl3e[(pa & ~PAGE_MASK) / sizeof(l3_pgentry_t)], shadow_type_to_level(PGT_l3_shadow)); unmap_domain_page_with_cache(spl3e, cache); } shadow_unlock(d); } #endif #if CONFIG_PAGING_LEVELS >= 4 void shadow_l4_normal_pt_update( struct domain *d, unsigned long pa, l4_pgentry_t gpde, struct domain_mmap_cache *cache) { unsigned long sl4mfn; pgentry_64_t *spl4e; shadow_lock(d); sl4mfn = __shadow_status(current->domain, pa >> PAGE_SHIFT, PGT_l4_shadow); if ( sl4mfn ) { SH_VVLOG("shadow_l4_normal_pt_update pa=%p, gpde=%" PRIpte, (void *)pa, l4e_get_intpte(gpde)); spl4e = (pgentry_64_t *)map_domain_page_with_cache(sl4mfn, cache); validate_entry_change(d, (pgentry_64_t *)&gpde, &spl4e[(pa & ~PAGE_MASK) / sizeof(l4_pgentry_t)], shadow_type_to_level(PGT_l4_shadow)); unmap_domain_page_with_cache(spl4e, cache); } shadow_unlock(d); } #endif static void translate_l1pgtable(struct domain *d, l1_pgentry_t *p2m, unsigned long l1mfn) { int i; l1_pgentry_t *l1; l1 = map_domain_page(l1mfn); for (i = 0; i < L1_PAGETABLE_ENTRIES; i++) { if ( is_guest_l1_slot(i) && (l1e_get_flags(l1[i]) & _PAGE_PRESENT) ) { unsigned long mfn = l1e_get_pfn(l1[i]); unsigned long gpfn = __mfn_to_gpfn(d, mfn); ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn); l1[i] = l1e_from_pfn(gpfn, l1e_get_flags(l1[i])); } } unmap_domain_page(l1); } // This is not general enough to handle arbitrary pagetables // with shared L1 pages, etc., but it is sufficient for bringing // up dom0. // void translate_l2pgtable(struct domain *d, l1_pgentry_t *p2m, unsigned long l2mfn, unsigned int type) { int i; l2_pgentry_t *l2; ASSERT(shadow_mode_translate(d) && !shadow_mode_external(d)); l2 = map_domain_page(l2mfn); for (i = 0; i < L2_PAGETABLE_ENTRIES; i++) { if ( is_guest_l2_slot(type, i) && (l2e_get_flags(l2[i]) & _PAGE_PRESENT) ) { unsigned long mfn = l2e_get_pfn(l2[i]); unsigned long gpfn = __mfn_to_gpfn(d, mfn); ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn); l2[i] = l2e_from_pfn(gpfn, l2e_get_flags(l2[i])); translate_l1pgtable(d, p2m, mfn); } } unmap_domain_page(l2); } void remove_shadow(struct domain *d, unsigned long gpfn, u32 stype) { unsigned long smfn; //printk("%s(gpfn=%lx, type=%x)\n", __func__, gpfn, stype); shadow_lock(d); while ( stype >= PGT_l1_shadow ) { smfn = __shadow_status(d, gpfn, stype); if ( smfn && MFN_PINNED(smfn) ) shadow_unpin(smfn); stype -= PGT_l1_shadow; } shadow_unlock(d); } unsigned long gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn) { ASSERT( shadow_mode_translate(d) ); perfc_incrc(gpfn_to_mfn_foreign); unsigned long va = gpfn << PAGE_SHIFT; unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table); l2_pgentry_t *l2 = map_domain_page(tabpfn); l2_pgentry_t l2e = l2[l2_table_offset(va)]; unmap_domain_page(l2); if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) { printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => 0 l2e=%" PRIpte "\n", d->domain_id, gpfn, l2e_get_intpte(l2e)); return INVALID_MFN; } l1_pgentry_t *l1 = map_domain_page(l2e_get_pfn(l2e)); l1_pgentry_t l1e = l1[l1_table_offset(va)]; unmap_domain_page(l1); #if 0 printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => %lx tabpfn=%lx l2e=%lx l1tab=%lx, l1e=%lx\n", d->domain_id, gpfn, l1_pgentry_val(l1e) >> PAGE_SHIFT, tabpfn, l2e, l1tab, l1e); #endif if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) ) { printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => 0 l1e=%" PRIpte "\n", d->domain_id, gpfn, l1e_get_intpte(l1e)); return INVALID_MFN; } return l1e_get_pfn(l1e); } static u32 remove_all_access_in_page( struct domain *d, unsigned long l1mfn, unsigned long forbidden_gmfn) { l1_pgentry_t *pl1e = map_domain_page(l1mfn); l1_pgentry_t match, ol2e; unsigned long flags = _PAGE_PRESENT; int i; u32 count = 0; int is_l1_shadow = ((frame_table[l1mfn].u.inuse.type_info & PGT_type_mask) == PGT_l1_shadow); match = l1e_from_pfn(forbidden_gmfn, flags); for (i = 0; i < L1_PAGETABLE_ENTRIES; i++) { if ( l1e_has_changed(pl1e[i], match, flags) ) continue; ol2e = pl1e[i]; pl1e[i] = l1e_empty(); count++; if ( is_l1_shadow ) shadow_put_page_from_l1e(ol2e, d); else /* must be an hl2 page */ put_page(&frame_table[forbidden_gmfn]); } unmap_domain_page(pl1e); return count; } static u32 __shadow_remove_all_access(struct domain *d, unsigned long forbidden_gmfn) { int i; struct shadow_status *a; u32 count = 0; if ( unlikely(!shadow_mode_enabled(d)) ) return 0; ASSERT(shadow_lock_is_acquired(d)); perfc_incrc(remove_all_access); for (i = 0; i < shadow_ht_buckets; i++) { a = &d->arch.shadow_ht[i]; while ( a && a->gpfn_and_flags ) { switch (a->gpfn_and_flags & PGT_type_mask) { case PGT_l1_shadow: case PGT_l2_shadow: case PGT_l3_shadow: case PGT_l4_shadow: case PGT_hl2_shadow: count += remove_all_access_in_page(d, a->smfn, forbidden_gmfn); break; case PGT_snapshot: case PGT_writable_pred: // these can't hold refs to the forbidden page break; default: BUG(); } a = a->next; } } return count; } void shadow_drop_references( struct domain *d, struct pfn_info *page) { if ( likely(!shadow_mode_refcounts(d)) || ((page->u.inuse.type_info & PGT_count_mask) == 0) ) return; /* XXX This needs more thought... */ printk("%s: needing to call __shadow_remove_all_access for mfn=%lx\n", __func__, page_to_pfn(page)); printk("Before: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page), page->count_info, page->u.inuse.type_info); shadow_lock(d); __shadow_remove_all_access(d, page_to_pfn(page)); shadow_unlock(d); printk("After: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page), page->count_info, page->u.inuse.type_info); } /* XXX Needs more thought. Neither pretty nor fast: a place holder. */ void shadow_sync_and_drop_references( struct domain *d, struct pfn_info *page) { if ( likely(!shadow_mode_refcounts(d)) ) return; shadow_lock(d); if ( page_out_of_sync(page) ) __shadow_sync_mfn(d, page_to_pfn(page)); __shadow_remove_all_access(d, page_to_pfn(page)); shadow_unlock(d); } /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */