aboutsummaryrefslogtreecommitdiffstats
path: root/testhal/STM32/STM32F37x/CAN/halconf.h
blob: 8a7b59dab8814f542bc40af00ae4b99c4da40428 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
/*
    ChibiOS - Copyright (C) 2006..2018 Giovanni Di Sirio

    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at

        http://www.apache.org/licenses/LICENSE-2.0

    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
*/

/**
 * @file    templates/halconf.h
 * @brief   HAL configuration header.
 * @details HAL configuration file, this file allows to enable or disable the
 *          various device drivers from your application. You may also use
 *          this file in order to override the device drivers default settings.
 *
 * @addtogroup HAL_CONF
 * @{
 */

#ifndef HALCONF_H
#define HALCONF_H

#define _CHIBIOS_HAL_CONF_
#define _CHIBIOS_HAL_CONF_VER_6_0_

#include "mcuconf.h"

/**
 * @brief   Enables the PAL subsystem.
 */
#if !defined(HAL_USE_PAL) || defined(__DOXYGEN__)
#define HAL_USE_PAL                         TRUE
#endif

/**
 * @brief   Enables the ADC subsystem.
 */
#if !defined(HAL_USE_ADC) || defined(__DOXYGEN__)
#define HAL_USE_ADC                         FALSE
#endif

/**
 * @brief   Enables the CAN subsystem.
 */
#if !defined(HAL_USE_CAN) || defined(__DOXYGEN__)
#define HAL_USE_CAN                         TRUE
#endif

/**
 * @brief   Enables the cryptographic subsystem.
 */
#if !defined(HAL_USE_CRY) || defined(__DOXYGEN__)
#define HAL_USE_CRY                         FALSE
#endif

/**
 * @brief   Enables the DAC subsystem.
 */
#if !defined(HAL_USE_DAC) || defined(__DOXYGEN__)
#define HAL_USE_DAC                         FALSE
#endif

/**
 * @brief   Enables the EXT subsystem.
 */
#if !defined(HAL_USE_EXT) || defined(__DOXYGEN__)
#define HAL_USE_EXT                         FALSE
#endif

/**
 * @brief   Enables the GPT subsystem.
 */
#if !defined(HAL_USE_GPT) || defined(__DOXYGEN__)
#define HAL_USE_GPT                         FALSE
#endif

/**
 * @brief   Enables the I2C subsystem.
 */
#if !defined(HAL_USE_I2C) || defined(__DOXYGEN__)
#define HAL_USE_I2C                         FALSE
#endif

/**
 * @brief   Enables the I2S subsystem.
 */
#if !defined(HAL_USE_I2S) || defined(__DOXYGEN__)
#define HAL_USE_I2S                         FALSE
#endif

/**
 * @brief   Enables the ICU subsystem.
 */
#if !defined(HAL_USE_ICU) || defined(__DOXYGEN__)
#define HAL_USE_ICU                         FALSE
#endif

/**
 * @brief   Enables the MAC subsystem.
 */
#if !defined(HAL_USE_MAC) || defined(__DOXYGEN__)
#define HAL_USE_MAC                         FALSE
#endif

/**
 * @brief   Enables the MMC_SPI subsystem.
 */
#if !defined(HAL_USE_MMC_SPI) || defined(__DOXYGEN__)
#define HAL_USE_MMC_SPI                     FALSE
#endif

/**
 * @brief   Enables the PWM subsystem.
 */
#if !defined(HAL_USE_PWM) || defined(__DOXYGEN__)
#define HAL_USE_PWM                         FALSE
#endif

/**
 * @brief   Enables the QSPI subsystem.
 */
#if !defined(HAL_USE_QSPI) || defined(__DOXYGEN__)
#define HAL_USE_QSPI                        FALSE
#endif

/**
 * @brief   Enables the RTC subsystem.
 */
#if !defined(HAL_USE_RTC) || defined(__DOXYGEN__)
#define HAL_USE_RTC                         FALSE
#endif

/**
 * @brief   Enables the SDC subsystem.
 */
#if !defined(HAL_USE_SDC) || defined(__DOXYGEN__)
#define HAL_USE_SDC                         FALSE
#endif

/**
 * @brief   Enables the SERIAL subsystem.
 */
#if !defined(HAL_USE_SERIAL) || defined(__DOXYGEN__)
#define HAL_USE_SERIAL                      FALSE
#endif

/**
 * @brief   Enables the SERIAL over USB subsystem.
 */
#if !defined(HAL_USE_SERIAL_USB) || defined(__DOXYGEN__)
#define HAL_USE_SERIAL_USB                  FALSE
#endif

/**
 * @brief   Enables the SPI subsystem.
 */
#if !defined(HAL_USE_SPI) || defined(__DOXYGEN__)
#define HAL_USE_SPI                         FALSE
#endif

/**
 * @brief   Enables the UART subsystem.
 */
#if !defined(HAL_USE_UART) || defined(__DOXYGEN__)
#define HAL_USE_UART                        FALSE
#endif

/**
 * @brief   Enables the USB subsystem.
 */
#if !defined(HAL_USE_USB) || defined(__DOXYGEN__)
#define HAL_USE_USB                         FALSE
#endif

/**
 * @brief   Enables the WDG subsystem.
 */
#if !defined(HAL_USE_WDG) || defined(__DOXYGEN__)
#define HAL_USE_WDG                         FALSE
#endif

/*===========================================================================*/
/* PAL driver related settings.                                              */
/*===========================================================================*/

/**
 * @brief   Enables synchronous APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(PAL_USE_CALLBACKS) || defined(__DOXYGEN__)
#define PAL_USE_CALLBACKS                   FALSE
#endif

/**
 * @brief   Enables synchronous APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(PAL_USE_WAIT) || defined(__DOXYGEN__)
#define PAL_USE_WAIT                        FALSE
#endif

/*===========================================================================*/
/* ADC driver related settings.                                              */
/*===========================================================================*/

/**
 * @brief   Enables synchronous APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(ADC_USE_WAIT) || defined(__DOXYGEN__)
#define ADC_USE_WAIT                        TRUE
#endif

/**
 * @brief   Enables the @p adcAcquireBus() and @p adcReleaseBus() APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(ADC_USE_MUTUAL_EXCLUSION) || defined(__DOXYGEN__)
#define ADC_USE_MUTUAL_EXCLUSION            TRUE
#endif

/*===========================================================================*/
/* CAN driver related settings.                                              */
/*===========================================================================*/

/**
 * @brief   Sleep mode related APIs inclusion switch.
 */
#if !defined(CAN_USE_SLEEP_MODE) || defined(__DOXYGEN__)
#define CAN_USE_SLEEP_MODE                  TRUE
#endif

/**
 * @brief   Enforces the driver to use direct callbacks rather than OSAL events.
 */
#if !defined(CAN_ENFORCE_USE_CALLBACKS) || defined(__DOXYGEN__)
#define CAN_ENFORCE_USE_CALLBACKS           FALSE
#endif

/*===========================================================================*/
/* CRY driver related settings.                                              */
/*===========================================================================*/

/**
 * @brief   Enables the SW fall-back of the cryptographic driver.
 * @details When enabled, this option, activates a fall-back software
 *          implementation for algorithms not supported by the underlying
 *          hardware.
 * @note    Fall-back implementations may not be present for all algorithms.
 */
#if !defined(HAL_CRY_USE_FALLBACK) || defined(__DOXYGEN__)
#define HAL_CRY_USE_FALLBACK                FALSE
#endif

/**
 * @brief   Makes the driver forcibly use the fall-back implementations.
 */
#if !defined(HAL_CRY_ENFORCE_FALLBACK) || defined(__DOXYGEN__)
#define HAL_CRY_ENFORCE_FALLBACK            FALSE
#endif

/*===========================================================================*/
/* DAC driver related settings.                                              */
/*===========================================================================*/

/**
 * @brief   Enables synchronous APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(DAC_USE_WAIT) || defined(__DOXYGEN__)
#define DAC_USE_WAIT                        TRUE
#endif

/**
 * @brief   Enables the @p dacAcquireBus() and @p dacReleaseBus() APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(DAC_USE_MUTUAL_EXCLUSION) || defined(__DOXYGEN__)
#define DAC_USE_MUTUAL_EXCLUSION            TRUE
#endif

/*===========================================================================*/
/* I2C driver related settings.                                              */
/*===========================================================================*/

/**
 * @brief   Enables the mutual exclusion APIs on the I2C bus.
 */
#if !defined(I2C_USE_MUTUAL_EXCLUSION) || defined(__DOXYGEN__)
#define I2C_USE_MUTUAL_EXCLUSION            TRUE
#endif

/*===========================================================================*/
/* MAC driver related settings.                                              */
/*===========================================================================*/

/**
 * @brief   Enables the zero-copy API.
 */
#if !defined(MAC_USE_ZERO_COPY) || defined(__DOXYGEN__)
#define MAC_USE_ZERO_COPY                   FALSE
#endif

/**
 * @brief   Enables an event sources for incoming packets.
 */
#if !defined(MAC_USE_EVENTS) || defined(__DOXYGEN__)
#define MAC_USE_EVENTS                      TRUE
#endif

/*===========================================================================*/
/* MMC_SPI driver related settings.                                          */
/*===========================================================================*/

/**
 * @brief   Delays insertions.
 * @details If enabled this options inserts delays into the MMC waiting
 *          routines releasing some extra CPU time for the threads with
 *          lower priority, this may slow down the driver a bit however.
 *          This option is recommended also if the SPI driver does not
 *          use a DMA channel and heavily loads the CPU.
 */
#if !defined(MMC_NICE_WAITING) || defined(__DOXYGEN__)
#define MMC_NICE_WAITING                    TRUE
#endif

/*===========================================================================*/
/* QSPI driver related settings.                                             */
/*===========================================================================*/

/**
 * @brief   Enables synchronous APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(QSPI_USE_WAIT) || defined(__DOXYGEN__)
#define QSPI_USE_WAIT                       TRUE
#endif

/**
 * @brief   Enables the @p qspiAcquireBus() and @p qspiReleaseBus() APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(QSPI_USE_MUTUAL_EXCLUSION) || defined(__DOXYGEN__)
#define QSPI_USE_MUTUAL_EXCLUSION           TRUE
#endif

/*===========================================================================*/
/* SDC driver related settings.                                              */
/*===========================================================================*/

/**
 * @brief   Number of initialization attempts before rejecting the card.
 * @note    Attempts are performed at 10mS intervals.
 */
#if !defined(SDC_INIT_RETRY) || defined(__DOXYGEN__)
#define SDC_INIT_RETRY                      100
#endif

/**
 * @brief   Include support for MMC cards.
 * @note    MMC support is not yet implemented so this option must be kept
 *          at @p FALSE.
 */
#if !defined(SDC_MMC_SUPPORT) || defined(__DOXYGEN__)
#define SDC_MMC_SUPPORT                     FALSE
#endif

/**
 * @brief   Delays insertions.
 * @details If enabled this options inserts delays into the MMC waiting
 *          routines releasing some extra CPU time for the threads with
 *          lower priority, this may slow down the driver a bit however.
 */
#if !defined(SDC_NICE_WAITING) || defined(__DOXYGEN__)
#define SDC_NICE_WAITING                    TRUE
#endif

/**
 * @brief   OCR initialization constant for V20 cards.
 */
#if !defined(SDC_INIT_OCR_V20) || defined(__DOXYGEN__)
#define SDC_INIT_OCR_V20                    0x50FF8000U
#endif

/**
 * @brief   OCR initialization constant for non-V20 cards.
 */
#if !defined(SDC_INIT_OCR) || defined(__DOXYGEN__)
#define SDC_INIT_OCR                        0x80100000U
#endif

/*===========================================================================*/
/* SERIAL driver related settings.                                           */
/*===========================================================================*/

/**
 * @brief   Default bit rate.
 * @details Configuration parameter, this is the baud rate selected for the
 *          default configuration.
 */
#if !defined(SERIAL_DEFAULT_BITRATE) || defined(__DOXYGEN__)
#define SERIAL_DEFAULT_BITRATE              38400
#endif

/**
 * @brief   Serial buffers size.
 * @details Configuration parameter, you can change the depth of the queue
 *          buffers depending on the requirements of your application.
 * @note    The default is 16 bytes for both the transmission and receive
 *          buffers.
 */
#if !defined(SERIAL_BUFFERS_SIZE) || defined(__DOXYGEN__)
#define SERIAL_BUFFERS_SIZE                 16
#endif

/*===========================================================================*/
/* SERIAL_USB driver related setting.                                        */
/*===========================================================================*/

/**
 * @brief   Serial over USB buffers size.
 * @details Configuration parameter, the buffer size must be a multiple of
 *          the USB data endpoint maximum packet size.
 * @note    The default is 256 bytes for both the transmission and receive
 *          buffers.
 */
#if !defined(SERIAL_USB_BUFFERS_SIZE) || defined(__DOXYGEN__)
#define SERIAL_USB_BUFFERS_SIZE             256
#endif

/**
 * @brief   Serial over USB number of buffers.
 * @note    The default is 2 buffers.
 */
#if !defined(SERIAL_USB_BUFFERS_NUMBER) || defined(__DOXYGEN__)
#define SERIAL_USB_BUFFERS_NUMBER           2
#endif

/*===========================================================================*/
/* SPI driver related settings.                                              */
/*===========================================================================*/

/**
 * @brief   Enables synchronous APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(SPI_USE_WAIT) || defined(__DOXYGEN__)
#define SPI_USE_WAIT                        TRUE
#endif

/**
 * @brief   Enables circular transfers APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(SPI_USE_CIRCULAR) || defined(__DOXYGEN__)
#define SPI_USE_CIRCULAR                    FALSE
#endif


/**
 * @brief   Enables the @p spiAcquireBus() and @p spiReleaseBus() APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(SPI_USE_MUTUAL_EXCLUSION) || defined(__DOXYGEN__)
#define SPI_USE_MUTUAL_EXCLUSION            TRUE
#endif

/**
 * @brief   Handling method for SPI CS line.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(SPI_SELECT_MODE) || defined(__DOXYGEN__)
#define SPI_SELECT_MODE                     SPI_SELECT_MODE_PAD
#endif

/*===========================================================================*/
/* UART driver related settings.                                             */
/*===========================================================================*/

/**
 * @brief   Enables synchronous APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(UART_USE_WAIT) || defined(__DOXYGEN__)
#define UART_USE_WAIT                       FALSE
#endif

/**
 * @brief   Enables the @p uartAcquireBus() and @p uartReleaseBus() APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(UART_USE_MUTUAL_EXCLUSION) || defined(__DOXYGEN__)
#define UART_USE_MUTUAL_EXCLUSION           FALSE
#endif

/*===========================================================================*/
/* USB driver related settings.                                              */
/*===========================================================================*/

/**
 * @brief   Enables synchronous APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(USB_USE_WAIT) || defined(__DOXYGEN__)
#define USB_USE_WAIT                        FALSE
#endif

#endif /* HALCONF_H */

/** @} */
7' href='#n1437'>1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692
/******************************************************************************
 * Arch-specific domctl.c
 * 
 * Copyright (c) 2002-2006, K A Fraser
 */

#include <xen/config.h>
#include <xen/types.h>
#include <xen/lib.h>
#include <xen/mm.h>
#include <xen/guest_access.h>
#include <xen/compat.h>
#include <xen/pci.h>
#include <public/domctl.h>
#include <xen/sched.h>
#include <xen/domain.h>
#include <xen/event.h>
#include <xen/domain_page.h>
#include <asm/msr.h>
#include <xen/trace.h>
#include <xen/console.h>
#include <xen/iocap.h>
#include <xen/paging.h>
#include <asm/irq.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/support.h>
#include <asm/hvm/cacheattr.h>
#include <asm/processor.h>
#include <asm/acpi.h> /* for hvm_acpi_power_button */
#include <asm/hypercall.h> /* for arch_do_domctl */
#include <xsm/xsm.h>
#include <xen/iommu.h>
#include <asm/mem_event.h>
#include <public/mem_event.h>
#include <asm/mem_sharing.h>
#include <asm/i387.h>

#ifdef XEN_KDB_CONFIG
#include "../kdb/include/kdbdefs.h"
#include "../kdb/include/kdbproto.h"
#else
typedef unsigned long kdbva_t;
typedef unsigned char kdbbyt_t;
extern int dbg_rw_mem(kdbva_t, kdbbyt_t *, int, domid_t, int, uint64_t);
#endif

static int gdbsx_guest_mem_io(
    domid_t domid, struct xen_domctl_gdbsx_memio *iop)
{   
    ulong l_uva = (ulong)iop->uva;
    iop->remain = dbg_rw_mem(
        (kdbva_t)iop->gva, (kdbbyt_t *)l_uva, iop->len, domid,
        iop->gwr, iop->pgd3val);
    return (iop->remain ? -EFAULT : 0);
}

long arch_do_domctl(
    struct xen_domctl *domctl,
    XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
{
    long ret = 0;

    switch ( domctl->cmd )
    {

    case XEN_DOMCTL_shadow_op:
    {
        struct domain *d;
        ret = -ESRCH;
        d = rcu_lock_domain_by_id(domctl->domain);
        if ( d != NULL )
        {
            ret = paging_domctl(d,
                                &domctl->u.shadow_op,
                                guest_handle_cast(u_domctl, void));
            rcu_unlock_domain(d);
            copy_to_guest(u_domctl, domctl, 1);
        } 
    }
    break;

    case XEN_DOMCTL_ioport_permission:
    {
        struct domain *d;
        unsigned int fp = domctl->u.ioport_permission.first_port;
        unsigned int np = domctl->u.ioport_permission.nr_ports;

        ret = -EINVAL;
        if ( (fp + np) > 65536 )
            break;

        ret = -ESRCH;
        if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
            break;

        if ( np == 0 )
            ret = 0;
        else if ( domctl->u.ioport_permission.allow_access )
            ret = ioports_permit_access(d, fp, fp + np - 1);
        else
            ret = ioports_deny_access(d, fp, fp + np - 1);

        rcu_unlock_domain(d);
    }
    break;

    case XEN_DOMCTL_getpageframeinfo:
    {
        struct page_info *page;
        unsigned long mfn = domctl->u.getpageframeinfo.gmfn;
        domid_t dom = domctl->domain;
        struct domain *d;

        ret = -EINVAL;

        if ( unlikely(!mfn_valid(mfn)) ||
             unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
            break;

        page = mfn_to_page(mfn);

        ret = xsm_getpageframeinfo(page);
        if ( ret )
        {
            rcu_unlock_domain(d);
            break;
        }

        if ( likely(get_page(page, d)) )
        {
            ret = 0;

            domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_NOTAB;

            if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
            {
                switch ( page->u.inuse.type_info & PGT_type_mask )
                {
                case PGT_l1_page_table:
                    domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L1TAB;
                    break;
                case PGT_l2_page_table:
                    domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L2TAB;
                    break;
                case PGT_l3_page_table:
                    domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L3TAB;
                    break;
                case PGT_l4_page_table:
                    domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L4TAB;
                    break;
                }
            }
            
            put_page(page);
        }

        rcu_unlock_domain(d);

        copy_to_guest(u_domctl, domctl, 1);
    }
    break;

    case XEN_DOMCTL_getpageframeinfo3:
#ifdef __x86_64__
        if (!has_32bit_shinfo(current->domain))
        {
            unsigned int n, j;
            unsigned int num = domctl->u.getpageframeinfo3.num;
            domid_t dom = domctl->domain;
            struct domain *d;
            struct page_info *page;
            xen_pfn_t *arr;

            ret = -ESRCH;
            if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
                break;

            if ( unlikely(num > 1024) ||
                 unlikely(num != domctl->u.getpageframeinfo3.num) )
            {
                ret = -E2BIG;
                rcu_unlock_domain(d);
                break;
            }

            page = alloc_domheap_page(NULL, 0);
            if ( !page )
            {
                ret = -ENOMEM;
                rcu_unlock_domain(d);
                break;
            }
            arr = page_to_virt(page);

            for ( n = ret = 0; n < num; )
            {
                unsigned int k = min_t(unsigned int, num - n,
                                       PAGE_SIZE / sizeof(*arr));

                if ( copy_from_guest_offset(arr,
                                            domctl->u.getpageframeinfo3.array,
                                            n, k) )
                {
                    ret = -EFAULT;
                    break;
                }

                for ( j = 0; j < k; j++ )
                {
                    unsigned long type = 0, mfn = gmfn_to_mfn(d, arr[j]);

                    page = mfn_to_page(mfn);

                    if ( unlikely(!mfn_valid(mfn)) ||
                         unlikely(is_xen_heap_mfn(mfn)) )
                        type = XEN_DOMCTL_PFINFO_XTAB;
                    else if ( xsm_getpageframeinfo(page) != 0 )
                        ;
                    else if ( likely(get_page(page, d)) )
                    {
                        switch( page->u.inuse.type_info & PGT_type_mask )
                        {
                        case PGT_l1_page_table:
                            type = XEN_DOMCTL_PFINFO_L1TAB;
                            break;
                        case PGT_l2_page_table:
                            type = XEN_DOMCTL_PFINFO_L2TAB;
                            break;
                        case PGT_l3_page_table:
                            type = XEN_DOMCTL_PFINFO_L3TAB;
                            break;
                        case PGT_l4_page_table:
                            type = XEN_DOMCTL_PFINFO_L4TAB;
                            break;
                        }

                        if ( page->u.inuse.type_info & PGT_pinned )
                            type |= XEN_DOMCTL_PFINFO_LPINTAB;

                        put_page(page);
                    }
                    else
                        type = XEN_DOMCTL_PFINFO_XTAB;

                    arr[j] = type;
                }

                if ( copy_to_guest_offset(domctl->u.getpageframeinfo3.array,
                                          n, arr, k) )
                {
                    ret = -EFAULT;
                    break;
                }

                n += k;
            }

            free_domheap_page(virt_to_page(arr));

            rcu_unlock_domain(d);
            break;
        }
#endif
        /* fall thru */
    case XEN_DOMCTL_getpageframeinfo2:
    {
        int n,j;
        int num = domctl->u.getpageframeinfo2.num;
        domid_t dom = domctl->domain;
        struct domain *d;
        uint32_t *arr32;
        ret = -ESRCH;

        if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
            break;

        if ( unlikely(num > 1024) )
        {
            ret = -E2BIG;
            rcu_unlock_domain(d);
            break;
        }

        arr32 = alloc_xenheap_page();
        if ( !arr32 )
        {
            ret = -ENOMEM;
            rcu_unlock_domain(d);
            break;
        }
 
        ret = 0;
        for ( n = 0; n < num; )
        {
            int k = PAGE_SIZE / 4;
            if ( (num - n) < k )
                k = num - n;

            if ( copy_from_guest_offset(arr32,
                                        domctl->u.getpageframeinfo2.array,
                                        n, k) )
            {
                ret = -EFAULT;
                break;
            }
     
            for ( j = 0; j < k; j++ )
            {      
                struct page_info *page;
                unsigned long mfn = gmfn_to_mfn(d, arr32[j]);

                page = mfn_to_page(mfn);

                if ( domctl->cmd == XEN_DOMCTL_getpageframeinfo3)
                    arr32[j] = 0;

                if ( unlikely(!mfn_valid(mfn)) ||
                     unlikely(is_xen_heap_mfn(mfn)) )
                    arr32[j] |= XEN_DOMCTL_PFINFO_XTAB;
                else if ( xsm_getpageframeinfo(page) != 0 )
                    continue;
                else if ( likely(get_page(page, d)) )
                {
                    unsigned long type = 0;

                    switch( page->u.inuse.type_info & PGT_type_mask )
                    {
                    case PGT_l1_page_table:
                        type = XEN_DOMCTL_PFINFO_L1TAB;
                        break;
                    case PGT_l2_page_table:
                        type = XEN_DOMCTL_PFINFO_L2TAB;
                        break;
                    case PGT_l3_page_table:
                        type = XEN_DOMCTL_PFINFO_L3TAB;
                        break;
                    case PGT_l4_page_table:
                        type = XEN_DOMCTL_PFINFO_L4TAB;
                        break;
                    }

                    if ( page->u.inuse.type_info & PGT_pinned )
                        type |= XEN_DOMCTL_PFINFO_LPINTAB;
                    arr32[j] |= type;
                    put_page(page);
                }
                else
                    arr32[j] |= XEN_DOMCTL_PFINFO_XTAB;

            }

            if ( copy_to_guest_offset(domctl->u.getpageframeinfo2.array,
                                      n, arr32, k) )
            {
                ret = -EFAULT;
                break;
            }

            n += k;
        }

        free_xenheap_page(arr32);

        rcu_unlock_domain(d);
    }
    break;

    case XEN_DOMCTL_getmemlist:
    {
        int i;
        struct domain *d = rcu_lock_domain_by_id(domctl->domain);
        unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
        uint64_t mfn;
        struct page_info *page;

        ret = -EINVAL;
        if ( d != NULL )
        {
            ret = xsm_getmemlist(d);
            if ( ret )
            {
                rcu_unlock_domain(d);
                break;
            }

            spin_lock(&d->page_alloc_lock);

            if ( unlikely(d->is_dying) ) {
                spin_unlock(&d->page_alloc_lock);
                goto getmemlist_out;
            }

            ret = i = 0;
            page_list_for_each(page, &d->page_list)
            {
                if ( i >= max_pfns )
                    break;
                mfn = page_to_mfn(page);
                if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
                                          i, &mfn, 1) )
                {
                    ret = -EFAULT;
                    break;
                }
                ++i;
            }
            
            spin_unlock(&d->page_alloc_lock);

            domctl->u.getmemlist.num_pfns = i;
            copy_to_guest(u_domctl, domctl, 1);
        getmemlist_out:
            rcu_unlock_domain(d);
        }
    }
    break;

    case XEN_DOMCTL_hypercall_init:
    {
        struct domain *d = rcu_lock_domain_by_id(domctl->domain);
        unsigned long gmfn = domctl->u.hypercall_init.gmfn;
        unsigned long mfn;
        void *hypercall_page;

        ret = -ESRCH;
        if ( unlikely(d == NULL) )
            break;

        ret = xsm_hypercall_init(d);
        if ( ret )
        {
            rcu_unlock_domain(d);
            break;
        }

        mfn = gmfn_to_mfn(d, gmfn);

        ret = -EACCES;
        if ( !mfn_valid(mfn) ||
             !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
        {
            rcu_unlock_domain(d);
            break;
        }

        ret = 0;

        hypercall_page = map_domain_page(mfn);
        hypercall_page_initialise(d, hypercall_page);
        unmap_domain_page(hypercall_page);

        put_page_and_type(mfn_to_page(mfn));

        rcu_unlock_domain(d);
    }
    break;

    case XEN_DOMCTL_sethvmcontext:
    { 
        struct hvm_domain_context c = { .size = domctl->u.hvmcontext.size };
        struct domain *d;

        ret = -ESRCH;
        if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
            break;

        ret = xsm_hvmcontext(d, domctl->cmd);
        if ( ret )
            goto sethvmcontext_out;

        ret = -EINVAL;
        if ( !is_hvm_domain(d) ) 
            goto sethvmcontext_out;

        ret = -ENOMEM;
        if ( (c.data = xmalloc_bytes(c.size)) == NULL )
            goto sethvmcontext_out;

        ret = -EFAULT;
        if ( copy_from_guest(c.data, domctl->u.hvmcontext.buffer, c.size) != 0)
            goto sethvmcontext_out;

        domain_pause(d);
        ret = hvm_load(d, &c);
        domain_unpause(d);

    sethvmcontext_out:
        if ( c.data != NULL )
            xfree(c.data);

        rcu_unlock_domain(d);
    }
    break;

    case XEN_DOMCTL_gethvmcontext:
    { 
        struct hvm_domain_context c = { 0 };
        struct domain *d;

        ret = -ESRCH;
        if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
            break;

        ret = xsm_hvmcontext(d, domctl->cmd);
        if ( ret )
            goto gethvmcontext_out;

        ret = -EINVAL;
        if ( !is_hvm_domain(d) ) 
            goto gethvmcontext_out;

        c.size = hvm_save_size(d);

        if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
        {
            /* Client is querying for the correct buffer size */
            domctl->u.hvmcontext.size = c.size;
            ret = 0;
            goto gethvmcontext_out;            
        }

        /* Check that the client has a big enough buffer */
        ret = -ENOSPC;
        if ( domctl->u.hvmcontext.size < c.size ) 
            goto gethvmcontext_out;

        /* Allocate our own marshalling buffer */
        ret = -ENOMEM;
        if ( (c.data = xmalloc_bytes(c.size)) == NULL )
            goto gethvmcontext_out;

        domain_pause(d);
        ret = hvm_save(d, &c);
        domain_unpause(d);

        domctl->u.hvmcontext.size = c.cur;
        if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 )
            ret = -EFAULT;

    gethvmcontext_out:
        if ( copy_to_guest(u_domctl, domctl, 1) )
            ret = -EFAULT;

        if ( c.data != NULL )
            xfree(c.data);

        rcu_unlock_domain(d);
    }
    break;

    case XEN_DOMCTL_gethvmcontext_partial:
    { 
        struct domain *d;

        ret = -ESRCH;
        if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
            break;

        ret = xsm_hvmcontext(d, domctl->cmd);
        if ( ret )
            goto gethvmcontext_partial_out;

        ret = -EINVAL;
        if ( !is_hvm_domain(d) ) 
            goto gethvmcontext_partial_out;

        domain_pause(d);
        ret = hvm_save_one(d, domctl->u.hvmcontext_partial.type,
                           domctl->u.hvmcontext_partial.instance,
                           domctl->u.hvmcontext_partial.buffer);
        domain_unpause(d);

    gethvmcontext_partial_out:
        rcu_unlock_domain(d);
    }
    break;


    case XEN_DOMCTL_set_address_size:
    {
        struct domain *d;

        ret = -ESRCH;
        if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
            break;

        ret = xsm_address_size(d, domctl->cmd);
        if ( ret )
        {
            rcu_unlock_domain(d);
            break;
        }

        switch ( domctl->u.address_size.size )
        {
#ifdef CONFIG_COMPAT
        case 32:
            ret = switch_compat(d);
            break;
        case 64:
            ret = switch_native(d);
            break;
#endif
        default:
            ret = (domctl->u.address_size.size == BITS_PER_LONG) ? 0 : -EINVAL;
            break;
        }

        rcu_unlock_domain(d);
    }
    break;

    case XEN_DOMCTL_get_address_size:
    {
        struct domain *d;

        ret = -ESRCH;
        if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
            break;

        ret = xsm_address_size(d, domctl->cmd);
        if ( ret )
        {
            rcu_unlock_domain(d);
            break;
        }

        domctl->u.address_size.size =
            is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG;

        ret = 0;
        rcu_unlock_domain(d);

        if ( copy_to_guest(u_domctl, domctl, 1) )
            ret = -EFAULT;
    }
    break;

    case XEN_DOMCTL_set_machine_address_size:
    {
        struct domain *d;

        ret = -ESRCH;
        if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
            break;

        ret = xsm_machine_address_size(d, domctl->cmd);
        if ( ret )
            rcu_unlock_domain(d);

        ret = -EBUSY;
        if ( d->tot_pages > 0 )
            goto set_machine_address_size_out;

        d->arch.physaddr_bitsize = domctl->u.address_size.size;

        ret = 0;
    set_machine_address_size_out:
        rcu_unlock_domain(d);
    }
    break;

    case XEN_DOMCTL_get_machine_address_size:
    {
        struct domain *d;

        ret = -ESRCH;
        if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
            break;

        ret = xsm_machine_address_size(d, domctl->cmd);
        if ( ret )
        {
            rcu_unlock_domain(d);
            break;
        }

        domctl->u.address_size.size = d->arch.physaddr_bitsize;

        ret = 0;
        rcu_unlock_domain(d);

        if ( copy_to_guest(u_domctl, domctl, 1) )
            ret = -EFAULT;


    }
    break;

    case XEN_DOMCTL_sendtrigger:
    {
        struct domain *d;
        struct vcpu *v;

        ret = -ESRCH;
        if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
            break;

        ret = xsm_sendtrigger(d);
        if ( ret )
            goto sendtrigger_out;

        ret = -EINVAL;
        if ( domctl->u.sendtrigger.vcpu >= MAX_VIRT_CPUS )
            goto sendtrigger_out;

        ret = -ESRCH;
        if ( domctl->u.sendtrigger.vcpu >= d->max_vcpus ||
             (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL )
            goto sendtrigger_out;

        switch ( domctl->u.sendtrigger.trigger )
        {
        case XEN_DOMCTL_SENDTRIGGER_NMI:
        {
            ret = 0;
            if ( !test_and_set_bool(v->nmi_pending) )
                vcpu_kick(v);
        }
        break;

        case XEN_DOMCTL_SENDTRIGGER_POWER:
        {
            ret = -EINVAL;
            if ( is_hvm_domain(d) ) 
            {
                ret = 0;
                hvm_acpi_power_button(d);
            }
        }
        break;

        case XEN_DOMCTL_SENDTRIGGER_SLEEP:
        {
            extern void hvm_acpi_sleep_button(struct domain *d);

            ret = -EINVAL;
            if ( is_hvm_domain(d) ) 
            {
                ret = 0;
                hvm_acpi_sleep_button(d);
            }
        }
        break;

        default:
            ret = -ENOSYS;
        }

    sendtrigger_out:
        rcu_unlock_domain(d);
    }
    break;

    case XEN_DOMCTL_get_device_group:
    {
        struct domain *d;
        u32 max_sdevs;
        u8 bus, devfn;
        XEN_GUEST_HANDLE_64(uint32) sdevs;
        int num_sdevs;

        ret = -ENOSYS;
        if ( !iommu_enabled )
            break;

        ret = -EINVAL;
        if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
            break;

        bus = (domctl->u.get_device_group.machine_bdf >> 16) & 0xff;
        devfn = (domctl->u.get_device_group.machine_bdf >> 8) & 0xff;
        max_sdevs = domctl->u.get_device_group.max_sdevs;
        sdevs = domctl->u.get_device_group.sdev_array;

        num_sdevs = iommu_get_device_group(d, bus, devfn, sdevs, max_sdevs);
        if ( num_sdevs < 0 )
        {
            dprintk(XENLOG_ERR, "iommu_get_device_group() failed!\n");
            ret = -EFAULT;
            domctl->u.get_device_group.num_sdevs = 0;
        }
        else
        {
            ret = 0;
            domctl->u.get_device_group.num_sdevs = num_sdevs;
        }
        if ( copy_to_guest(u_domctl, domctl, 1) )
            ret = -EFAULT;
        rcu_unlock_domain(d);
    }
    break;

    case XEN_DOMCTL_test_assign_device:
    {
        u8 bus, devfn;

        ret = -ENOSYS;
        if ( !iommu_enabled )
            break;

        ret = xsm_test_assign_device(domctl->u.assign_device.machine_bdf);
        if ( ret )
            break;

        ret = -EINVAL;
        bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
        devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;

        if ( device_assigned(bus, devfn) )
        {
            gdprintk(XENLOG_ERR, "XEN_DOMCTL_test_assign_device: "
                     "%x:%x.%x already assigned, or non-existent\n",
                     bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
            break;
        }
        ret = 0;
    }
    break;

    case XEN_DOMCTL_assign_device:
    {
        struct domain *d;
        u8 bus, devfn;

        ret = -ENOSYS;
        if ( !iommu_enabled )
            break;

        ret = -EINVAL;
        if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
        {
            gdprintk(XENLOG_ERR,
                "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n");
            break;
        }

        ret = xsm_assign_device(d, domctl->u.assign_device.machine_bdf);
        if ( ret )
            goto assign_device_out;

        bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
        devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;

        ret = assign_device(d, bus, devfn);
        if ( ret )
            gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: "
                     "assign device (%x:%x.%x) failed\n",
                     bus, PCI_SLOT(devfn), PCI_FUNC(devfn));

    assign_device_out:
        put_domain(d);
    }
    break;

    case XEN_DOMCTL_deassign_device:
    {
        struct domain *d;
        u8 bus, devfn;

        ret = -ENOSYS;
        if ( !iommu_enabled )
            break;

        ret = -EINVAL;
        if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
        {
            gdprintk(XENLOG_ERR,
                "XEN_DOMCTL_deassign_device: get_domain_by_id() failed\n");
            break;
        }

        ret = xsm_assign_device(d, domctl->u.assign_device.machine_bdf);
        if ( ret )
            goto deassign_device_out;

        bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
        devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;

        spin_lock(&pcidevs_lock);
        ret = deassign_device(d, bus, devfn);
        spin_unlock(&pcidevs_lock);
        if ( ret )
            gdprintk(XENLOG_ERR, "XEN_DOMCTL_deassign_device: "
                     "deassign device (%x:%x.%x) failed\n",
                     bus, PCI_SLOT(devfn), PCI_FUNC(devfn));

    deassign_device_out:
        put_domain(d);
    }
    break;

    case XEN_DOMCTL_bind_pt_irq:
    {
        struct domain * d;
        xen_domctl_bind_pt_irq_t * bind;

        ret = -ESRCH;
        if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
            break;
        bind = &(domctl->u.bind_pt_irq);

        ret = xsm_bind_pt_irq(d, bind);
        if ( ret )
            goto bind_out;

        ret = -EPERM;
        if ( !IS_PRIV(current->domain) &&
             !irq_access_permitted(current->domain, bind->machine_irq) )
            goto bind_out;

        ret = -ESRCH;
        if ( iommu_enabled )
        {
            spin_lock(&pcidevs_lock);
            ret = pt_irq_create_bind_vtd(d, bind);
            spin_unlock(&pcidevs_lock);
        }
        if ( ret < 0 )
            gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");

    bind_out:
        rcu_unlock_domain(d);
    }
    break;    

    case XEN_DOMCTL_unbind_pt_irq:
    {
        struct domain * d;
        xen_domctl_bind_pt_irq_t * bind;

        ret = -ESRCH;
        if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
            break;
        bind = &(domctl->u.bind_pt_irq);

        ret = -EPERM;
        if ( !IS_PRIV(current->domain) &&
             !irq_access_permitted(current->domain, bind->machine_irq) )
            goto unbind_out;

        if ( iommu_enabled )
        {
            spin_lock(&pcidevs_lock);
            ret = pt_irq_destroy_bind_vtd(d, bind);
            spin_unlock(&pcidevs_lock);
        }
        if ( ret < 0 )
            gdprintk(XENLOG_ERR, "pt_irq_destroy_bind failed!\n");

    unbind_out:
        rcu_unlock_domain(d);
    }
    break;

    case XEN_DOMCTL_memory_mapping:
    {
        struct domain *d;
        unsigned long gfn = domctl->u.memory_mapping.first_gfn;
        unsigned long mfn = domctl->u.memory_mapping.first_mfn;
        unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
        int i;

        ret = -EINVAL;
        if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
            break;

        ret = -EPERM;
        if ( !IS_PRIV(current->domain) &&
             !iomem_access_permitted(current->domain, mfn, mfn + nr_mfns - 1) )
            break;

        ret = -ESRCH;
        if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
            break;

        ret=0;
        if ( domctl->u.memory_mapping.add_mapping )
        {
            gdprintk(XENLOG_INFO,
                "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
                gfn, mfn, nr_mfns);

            ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
            for ( i = 0; i < nr_mfns; i++ )
                set_mmio_p2m_entry(p2m_get_hostp2m(d), gfn+i, _mfn(mfn+i));
        }
        else
        {
            gdprintk(XENLOG_INFO,
                "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
                 gfn, mfn, nr_mfns);

            for ( i = 0; i < nr_mfns; i++ )
                clear_mmio_p2m_entry(p2m_get_hostp2m(d), gfn+i);
            ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
        }

        rcu_unlock_domain(d);
    }
    break;

    case XEN_DOMCTL_ioport_mapping:
    {
#define MAX_IOPORTS    0x10000
        struct domain *d;
        struct hvm_iommu *hd;
        unsigned int fgp = domctl->u.ioport_mapping.first_gport;
        unsigned int fmp = domctl->u.ioport_mapping.first_mport;
        unsigned int np = domctl->u.ioport_mapping.nr_ports;
        struct g2m_ioport *g2m_ioport;
        int found = 0;

        ret = -EINVAL;
        if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) ||
            ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
        {
            gdprintk(XENLOG_ERR,
                "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n",
                fgp, fmp, np);
            break;
        }

        ret = -EPERM;
        if ( !IS_PRIV(current->domain) &&
             !ioports_access_permitted(current->domain, fmp, fmp + np - 1) )
            break;

        ret = -ESRCH;
        if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
            break;

        hd = domain_hvm_iommu(d);
        if ( domctl->u.ioport_mapping.add_mapping )
        {
            gdprintk(XENLOG_INFO,
                "ioport_map:add f_gport=%x f_mport=%x np=%x\n",
                fgp, fmp, np);

            list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
                if (g2m_ioport->mport == fmp )
                {
                    g2m_ioport->gport = fgp;
                    g2m_ioport->np = np;
                    found = 1;
                    break;
                }
            if ( !found )
            {
                g2m_ioport = xmalloc(struct g2m_ioport);
                g2m_ioport->gport = fgp;
                g2m_ioport->mport = fmp;
                g2m_ioport->np = np;
                list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
            }
            ret = ioports_permit_access(d, fmp, fmp + np - 1);
        }
        else
        {
            gdprintk(XENLOG_INFO,
                "ioport_map:remove f_gport=%x f_mport=%x np=%x\n",
                fgp, fmp, np);
            list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
                if ( g2m_ioport->mport == fmp )
                {
                    list_del(&g2m_ioport->list);
                    xfree(g2m_ioport);
                    break;
                }
            ret = ioports_deny_access(d, fmp, fmp + np - 1);
        }
        rcu_unlock_domain(d);
    }
    break;

    case XEN_DOMCTL_pin_mem_cacheattr:
    {
        struct domain *d;

        ret = -ESRCH;
        d = rcu_lock_domain_by_id(domctl->domain);
        if ( d == NULL )
            break;

        ret = xsm_pin_mem_cacheattr(d);
        if ( ret )
            goto pin_out;

        ret = hvm_set_mem_pinned_cacheattr(
            d, domctl->u.pin_mem_cacheattr.start,
            domctl->u.pin_mem_cacheattr.end,
            domctl->u.pin_mem_cacheattr.type);

    pin_out:
        rcu_unlock_domain(d);
    }
    break;

    case XEN_DOMCTL_set_ext_vcpucontext:
    case XEN_DOMCTL_get_ext_vcpucontext:
    {
        struct xen_domctl_ext_vcpucontext *evc;
        struct domain *d;
        struct vcpu *v;

        evc = &domctl->u.ext_vcpucontext;

        ret = -ESRCH;
        d = rcu_lock_domain_by_id(domctl->domain);
        if ( d == NULL )
            break;

        ret = xsm_ext_vcpucontext(d, domctl->cmd);
        if ( ret )
            goto ext_vcpucontext_out;

        ret = -ESRCH;
        if ( (evc->vcpu >= d->max_vcpus) ||
             ((v = d->vcpu[evc->vcpu]) == NULL) )
            goto ext_vcpucontext_out;

        if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext )
        {
            evc->size = sizeof(*evc);
#ifdef __x86_64__
            evc->sysenter_callback_cs      = v->arch.sysenter_callback_cs;
            evc->sysenter_callback_eip     = v->arch.sysenter_callback_eip;
            evc->sysenter_disables_events  = v->arch.sysenter_disables_events;
            evc->syscall32_callback_cs     = v->arch.syscall32_callback_cs;
            evc->syscall32_callback_eip    = v->arch.syscall32_callback_eip;
            evc->syscall32_disables_events = v->arch.syscall32_disables_events;
#else
            evc->sysenter_callback_cs      = 0;
            evc->sysenter_callback_eip     = 0;
            evc->sysenter_disables_events  = 0;
            evc->syscall32_callback_cs     = 0;
            evc->syscall32_callback_eip    = 0;
            evc->syscall32_disables_events = 0;
#endif
        }
        else
        {
            ret = -EINVAL;
            if ( evc->size != sizeof(*evc) )
                goto ext_vcpucontext_out;
#ifdef __x86_64__
            fixup_guest_code_selector(d, evc->sysenter_callback_cs);
            v->arch.sysenter_callback_cs      = evc->sysenter_callback_cs;
            v->arch.sysenter_callback_eip     = evc->sysenter_callback_eip;
            v->arch.sysenter_disables_events  = evc->sysenter_disables_events;
            fixup_guest_code_selector(d, evc->syscall32_callback_cs);
            v->arch.syscall32_callback_cs     = evc->syscall32_callback_cs;
            v->arch.syscall32_callback_eip    = evc->syscall32_callback_eip;
            v->arch.syscall32_disables_events = evc->syscall32_disables_events;
#else
            /* We do not support syscall/syscall32/sysenter on 32-bit Xen. */
            if ( (evc->sysenter_callback_cs & ~3) ||
                 evc->sysenter_callback_eip ||
                 (evc->syscall32_callback_cs & ~3) ||
                 evc->syscall32_callback_eip )
                goto ext_vcpucontext_out;
#endif
        }

        ret = 0;

    ext_vcpucontext_out:
        rcu_unlock_domain(d);
        if ( (domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext) &&
             copy_to_guest(u_domctl, domctl, 1) )
            ret = -EFAULT;
    }
    break;

    case XEN_DOMCTL_set_cpuid:
    {
        struct domain *d;
        xen_domctl_cpuid_t *ctl = &domctl->u.cpuid;
        cpuid_input_t *cpuid = NULL; 
        int i;

        ret = -ESRCH;
        d = rcu_lock_domain_by_id(domctl->domain);
        if ( d == NULL )
            break;

        for ( i = 0; i < MAX_CPUID_INPUT; i++ )
        {
            cpuid = &d->arch.cpuids[i];

            if ( cpuid->input[0] == XEN_CPUID_INPUT_UNUSED )
                break;

            if ( (cpuid->input[0] == ctl->input[0]) &&
                 ((cpuid->input[1] == XEN_CPUID_INPUT_UNUSED) ||
                  (cpuid->input[1] == ctl->input[1])) )
                break;
        }
        
        if ( i == MAX_CPUID_INPUT )
        {
            ret = -ENOENT;
        }
        else
        {
            memcpy(cpuid, ctl, sizeof(cpuid_input_t));
            ret = 0;
        }

        rcu_unlock_domain(d);
    }
    break;

    case XEN_DOMCTL_gettscinfo:
    {
        struct domain *d;
        xen_guest_tsc_info_t info;

        ret = -ESRCH;
        d = rcu_lock_domain_by_id(domctl->domain);
        if ( d == NULL )
            break;

        domain_pause(d);
        tsc_get_info(d, &info.tsc_mode,
                        &info.elapsed_nsec,
                        &info.gtsc_khz,
                        &info.incarnation);
        if ( copy_to_guest(domctl->u.tsc_info.out_info, &info, 1) )
            ret = -EFAULT;
        else
            ret = 0;
        domain_unpause(d);

        rcu_unlock_domain(d);
    }
    break;

    case XEN_DOMCTL_settscinfo:
    {
        struct domain *d;

        ret = -ESRCH;
        d = rcu_lock_domain_by_id(domctl->domain);
        if ( d == NULL )
            break;

        domain_pause(d);
        tsc_set_info(d, domctl->u.tsc_info.info.tsc_mode,
                     domctl->u.tsc_info.info.elapsed_nsec,
                     domctl->u.tsc_info.info.gtsc_khz,
                     domctl->u.tsc_info.info.incarnation);
        domain_unpause(d);

        rcu_unlock_domain(d);
        ret = 0;
    }
    break;

    case XEN_DOMCTL_suppress_spurious_page_faults:
    {
        struct domain *d;

        ret = -ESRCH;
        d = rcu_lock_domain_by_id(domctl->domain);
        if ( d != NULL )
        {
            d->arch.suppress_spurious_page_faults = 1;
            rcu_unlock_domain(d);
            ret = 0;
        }
    }
    break;

    case XEN_DOMCTL_debug_op:
    {
        struct domain *d;
        struct vcpu *v;