/****************************************************************************** * xc_linux_save.c * * Save the state of a running Linux session. * * Copyright (c) 2003, K A Fraser. */ #include #include #include #include #include #include "xc_private.h" #include "xg_private.h" #include "xg_save_restore.h" /* ** Default values for important tuning parameters. Can override by passing ** non-zero replacement values to xc_linux_save(). ** ** XXX SMH: should consider if want to be able to override MAX_MBIT_RATE too. ** */ #define DEF_MAX_ITERS 29 /* limit us to 30 times round loop */ #define DEF_MAX_FACTOR 3 /* never send more than 3x nr_pfns */ /* max mfn of the whole machine */ static unsigned long max_mfn; /* virtual starting address of the hypervisor */ static unsigned long hvirt_start; /* #levels of page tables used by the currrent guest */ static unsigned int pt_levels; /* total number of pages used by the current guest */ static unsigned long max_pfn; /* Live mapping of the table mapping each PFN to its current MFN. */ static xen_pfn_t *live_p2m = NULL; /* Live mapping of system MFN to PFN table. */ static xen_pfn_t *live_m2p = NULL; /* grep fodder: machine_to_phys */ #define mfn_to_pfn(_mfn) live_m2p[(_mfn)] /* * Returns TRUE if the given machine frame number has a unique mapping * in the guest's pseudophysical map. */ #define MFN_IS_IN_PSEUDOPHYS_MAP(_mfn) \ (((_mfn) < (max_mfn)) && \ ((mfn_to_pfn(_mfn) < (max_pfn)) && \ (live_p2m[mfn_to_pfn(_mfn)] == (_mfn)))) /* Returns TRUE if MFN is successfully converted to a PFN. */ #define translate_mfn_to_pfn(_pmfn) \ ({ \ unsigned long mfn = *(_pmfn); \ int _res = 1; \ if ( !MFN_IS_IN_PSEUDOPHYS_MAP(mfn) ) \ _res = 0; \ else \ *(_pmfn) = mfn_to_pfn(mfn); \ _res; \ }) /* ** During (live) save/migrate, we maintain a number of bitmaps to track ** which pages we have to send, to fixup, and to skip. */ #define BITS_PER_LONG (sizeof(unsigned long) * 8) #define BITMAP_SIZE ((max_pfn + BITS_PER_LONG - 1) / 8) #define BITMAP_ENTRY(_nr,_bmap) \ ((unsigned long *)(_bmap))[(_nr)/BITS_PER_LONG] #define BITMAP_SHIFT(_nr) ((_nr) % BITS_PER_LONG) static inline int test_bit (int nr, volatile void * addr) { return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1; } static inline void clear_bit (int nr, volatile void * addr) { BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr)); } static inline void set_bit ( int nr, volatile void * addr) { BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr)); } /* Returns the hamming weight (i.e. the number of bits set) in a N-bit word */ static inline unsigned int hweight32(unsigned int w) { unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555); res = (res & 0x33333333) + ((res >> 2) & 0x33333333); res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F); res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF); return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF); } static inline int count_bits ( int nr, volatile void *addr) { int i, count = 0; unsigned long *p = (unsigned long *)addr; /* We know that the array is padded to unsigned long. */ for( i = 0; i < (nr / (sizeof(unsigned long)*8)); i++, p++ ) count += hweight32(*p); return count; } static inline int permute( int i, int nr, int order_nr ) { /* Need a simple permutation function so that we scan pages in a pseudo random order, enabling us to get a better estimate of the domain's page dirtying rate as we go (there are often contiguous ranges of pfns that have similar behaviour, and we want to mix them up. */ /* e.g. nr->oder 15->4 16->4 17->5 */ /* 512MB domain, 128k pages, order 17 */ /* QPONMLKJIHGFEDCBA QPONMLKJIH GFEDCBA */ /* QPONMLKJIHGFEDCBA EDCBA QPONM LKJIHGF */ do { i = ((i>>(order_nr-10)) | ( i<<10 ) ) & ((1<= nr ); /* this won't ever loop if nr is a power of 2 */ return i; } static uint64_t tv_to_us(struct timeval *new) { return (new->tv_sec * 1000000) + new->tv_usec; } static uint64_t llgettimeofday(void) { struct timeval now; gettimeofday(&now, NULL); return tv_to_us(&now); } static uint64_t tv_delta(struct timeval *new, struct timeval *old) { return ((new->tv_sec - old->tv_sec)*1000000 ) + (new->tv_usec - old->tv_usec); } #ifdef ADAPTIVE_SAVE /* ** We control the rate at which we transmit (or save) to minimize impact ** on running domains (including the target if we're doing live migrate). */ #define MAX_MBIT_RATE 500 /* maximum transmit rate for migrate */ #define START_MBIT_RATE 100 /* initial transmit rate for migrate */ /* Scaling factor to convert between a rate (in Mb/s) and time (in usecs) */ #define RATE_TO_BTU 781250 /* Amount in bytes we allow ourselves to send in a burst */ #define BURST_BUDGET (100*1024) /* We keep track of the current and previous transmission rate */ static int mbit_rate, ombit_rate = 0; /* Have we reached the maximum transmission rate? */ #define RATE_IS_MAX() (mbit_rate == MAX_MBIT_RATE) static inline void initialize_mbit_rate() { mbit_rate = START_MBIT_RATE; } static int ratewrite(int io_fd, void *buf, int n) { static int budget = 0; static int burst_time_us = -1; static struct timeval last_put = { 0 }; struct timeval now; struct timespec delay; long long delta; if (START_MBIT_RATE == 0) return write(io_fd, buf, n); budget -= n; if (budget < 0) { if (mbit_rate != ombit_rate) { burst_time_us = RATE_TO_BTU / mbit_rate; ombit_rate = mbit_rate; DPRINTF("rate limit: %d mbit/s burst budget %d slot time %d\n", mbit_rate, BURST_BUDGET, burst_time_us); } if (last_put.tv_sec == 0) { budget += BURST_BUDGET; gettimeofday(&last_put, NULL); } else { while (budget < 0) { gettimeofday(&now, NULL); delta = tv_delta(&now, &last_put); while (delta > burst_time_us) { budget += BURST_BUDGET; last_put.tv_usec += burst_time_us; if (last_put.tv_usec > 1000000) { last_put.tv_usec -= 1000000; last_put.tv_sec++; } delta -= burst_time_us; } if (budget > 0) break; delay.tv_sec = 0; delay.tv_nsec = 1000 * (burst_time_us - delta); while (delay.tv_nsec > 0) if (nanosleep(&delay, &delay) == 0) break; } } } return write(io_fd, buf, n); } #else /* ! ADAPTIVE SAVE */ #define RATE_IS_MAX() (0) #define ratewrite(_io_fd, _buf, _n) write((_io_fd), (_buf), (_n)) #define initialize_mbit_rate() #endif static inline ssize_t write_exact(int fd, void *buf, size_t count) { if(write(fd, buf, count) != count) return 0; return 1; } static int print_stats(int xc_handle, uint32_t domid, int pages_sent, xc_shadow_op_stats_t *stats, int print) { static struct timeval wall_last; static long long d0_cpu_last; static long long d1_cpu_last; struct timeval wall_now; long long wall_delta; long long d0_cpu_now, d0_cpu_delta; long long d1_cpu_now, d1_cpu_delta; gettimeofday(&wall_now, NULL); d0_cpu_now = xc_domain_get_cpu_usage(xc_handle, 0, /* FIXME */ 0)/1000; d1_cpu_now = xc_domain_get_cpu_usage(xc_handle, domid, /* FIXME */ 0)/1000; if ( (d0_cpu_now == -1) || (d1_cpu_now == -1) ) DPRINTF("ARRHHH!!\n"); wall_delta = tv_delta(&wall_now,&wall_last)/1000; if (wall_delta == 0) wall_delta = 1; d0_cpu_delta = (d0_cpu_now - d0_cpu_last)/1000; d1_cpu_delta = (d1_cpu_now - d1_cpu_last)/1000; if (print) DPRINTF( "delta %lldms, dom0 %d%%, target %d%%, sent %dMb/s, " "dirtied %dMb/s %" PRId32 " pages\n", wall_delta, (int)((d0_cpu_delta*100)/wall_delta), (int)((d1_cpu_delta*100)/wall_delta), (int)((pages_sent*PAGE_SIZE)/(wall_delta*(1000/8))), (int)((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8))), stats->dirty_count); #ifdef ADAPTIVE_SAVE if (((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8))) > mbit_rate) { mbit_rate = (int)((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8))) + 50; if (mbit_rate > MAX_MBIT_RATE) mbit_rate = MAX_MBIT_RATE; } #endif d0_cpu_last = d0_cpu_now; d1_cpu_last = d1_cpu_now; wall_last = wall_now; return 0; } static int analysis_phase(int xc_handle, uint32_t domid, int max_pfn, unsigned long *arr, int runs) { long long start, now; xc_shadow_op_stats_t stats; int j; start = llgettimeofday(); for (j = 0; j < runs; j++) { int i; xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_CLEAN, arr, max_pfn, NULL, 0, NULL); DPRINTF("#Flush\n"); for ( i = 0; i < 40; i++ ) { usleep(50000); now = llgettimeofday(); xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_PEEK, NULL, 0, NULL, 0, &stats); DPRINTF("now= %lld faults= %"PRId32" dirty= %"PRId32"\n", ((now-start)+500)/1000, stats.fault_count, stats.dirty_count); } } return -1; } static int suspend_and_state(int (*suspend)(int), int xc_handle, int io_fd, int dom, xc_dominfo_t *info, vcpu_guest_context_t *ctxt) { int i = 0; if (!(*suspend)(dom)) { ERR("Suspend request failed"); return -1; } retry: if (xc_domain_getinfo(xc_handle, dom, 1, info) != 1) { ERR("Could not get domain info"); return -1; } if ( xc_vcpu_getcontext(xc_handle, dom, 0 /* XXX */, ctxt)) ERR("Could not get vcpu context"); if (info->shutdown && info->shutdown_reason == SHUTDOWN_suspend) return 0; // success if (info->paused) { // try unpausing domain, wait, and retest xc_domain_unpause( xc_handle, dom ); ERR("Domain was paused. Wait and re-test."); usleep(10000); // 10ms goto retry; } if( ++i < 100 ) { ERR("Retry suspend domain."); usleep(10000); // 10ms goto retry; } ERR("Unable to suspend domain."); return -1; } /* ** During transfer (or in the state file), all page-table pages must be ** converted into a 'canonical' form where references to actual mfns ** are replaced with references to the corresponding pfns. ** ** This function performs the appropriate conversion, taking into account ** which entries do not require canonicalization (in particular, those ** entries which map the virtual address reserved for the hypervisor). */ int canonicalize_pagetable(unsigned long type, unsigned long pfn, const void *spage, void *dpage) { int i, pte_last, xen_start, xen_end, race = 0; uint64_t pte; /* ** We need to determine which entries in this page table hold ** reserved hypervisor mappings. This depends on the current ** page table type as well as the number of paging levels. */ xen_start = xen_end = pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8); if (pt_levels == 2 && type == XEN_DOMCTL_PFINFO_L2TAB) xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT); if (pt_levels == 3 && type == XEN_DOMCTL_PFINFO_L3TAB) xen_start = L3_PAGETABLE_ENTRIES_PAE; /* ** in PAE only the L2 mapping the top 1GB contains Xen mappings. ** We can spot this by looking for the guest linear mapping which ** Xen always ensures is present in that L2. Guests must ensure ** that this check will fail for other L2s. */ if (pt_levels == 3 && type == XEN_DOMCTL_PFINFO_L2TAB) { /* XXX index of the L2 entry in PAE mode which holds the guest LPT */ #define PAE_GLPT_L2ENTRY (495) pte = ((uint64_t*)spage)[PAE_GLPT_L2ENTRY]; if(((pte >> PAGE_SHIFT) & 0x0fffffff) == live_p2m[pfn]) xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT_PAE) & 0x1ff; } if (pt_levels == 4 && type == XEN_DOMCTL_PFINFO_L4TAB) { /* ** XXX SMH: should compute these from hvirt_start (which we have) ** and hvirt_end (which we don't) */ xen_start = 256; xen_end = 272; } /* Now iterate through the page table, canonicalizing each PTE */ for (i = 0; i < pte_last; i++ ) { unsigned long pfn, mfn; if (pt_levels == 2) pte = ((uint32_t*)spage)[i]; else pte = ((uint64_t*)spage)[i]; if (i >= xen_start && i < xen_end) pte = 0; if (pte & _PAGE_PRESENT) { mfn = (pte >> PAGE_SHIFT) & 0xfffffff; if (!MFN_IS_IN_PSEUDOPHYS_MAP(mfn)) { /* This will happen if the type info is stale which is quite feasible under live migration */ DPRINTF("PT Race: [%08lx,%d] pte=%llx, mfn=%08lx\n", type, i, (unsigned long long)pte, mfn); pfn = 0; /* zap it - we'll retransmit this page later */ race = 1; /* inform the caller of race; fatal if !live */ } else pfn = mfn_to_pfn(mfn); pte &= 0xffffff0000000fffULL; pte |= (uint64_t)pfn << PAGE_SHIFT; } if (pt_levels == 2) ((uint32_t*)dpage)[i] = pte; else ((uint64_t*)dpage)[i] = pte; } return race; } static xen_pfn_t *xc_map_m2p(int xc_handle, unsigned long max_mfn, int prot) { struct xen_machphys_mfn_list xmml; privcmd_mmap_entry_t *entries; unsigned long m2p_chunks, m2p_size; xen_pfn_t *m2p; xen_pfn_t *extent_start; int i, rc; m2p_size = M2P_SIZE(max_mfn); m2p_chunks = M2P_CHUNKS(max_mfn); xmml.max_extents = m2p_chunks; if (!(extent_start = malloc(m2p_chunks * sizeof(xen_pfn_t)))) { ERR("failed to allocate space for m2p mfns"); return NULL; } set_xen_guest_handle(xmml.extent_start, extent_start); if (xc_memory_op(xc_handle, XENMEM_machphys_mfn_list, &xmml) || (xmml.nr_extents != m2p_chunks)) { ERR("xc_get_m2p_mfns"); return NULL; } if ((m2p = mmap(NULL, m2p_size, prot, MAP_SHARED, xc_handle, 0)) == MAP_FAILED) { ERR("failed to mmap m2p"); return NULL; } if (!(entries = malloc(m2p_chunks * sizeof(privcmd_mmap_entry_t)))) { ERR("failed to allocate space for mmap entries"); return NULL; } for (i=0; i < m2p_chunks; i++) { entries[i].va = (unsigned long)(((void *)m2p) + (i * M2P_CHUNK_SIZE)); entries[i].mfn = extent_start[i]; entries[i].npages = M2P_CHUNK_SIZE >> PAGE_SHIFT; } if ((rc = xc_map_foreign_ranges(xc_handle, DOMID_XEN, entries, m2p_chunks)) < 0) { ERR("xc_mmap_foreign_ranges failed (rc = %d)", rc); return NULL; } free(extent_start); free(entries); return m2p; } int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters, uint32_t max_factor, uint32_t flags, int (*suspend)(int)) { xc_dominfo_t info; int rc = 1, i, j, last_iter, iter = 0; int live = (flags & XCFLAGS_LIVE); int debug = (flags & XCFLAGS_DEBUG); int race = 0, sent_last_iter, skip_this_iter; /* The new d
/*
    ChibiOS - Copyright (C) 2006..2016 Giovanni Di Sirio

    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at

        http://www.apache.org/licenses/LICENSE-2.0

    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
*/

/**
 * @file    templates/halconf.h
 * @brief   HAL configuration header.
 * @details HAL configuration file, this file allows to enable or disable the
 *          various device drivers from your application. You may also use
 *          this file in order to override the device drivers default settings.
 *
 * @addtogroup HAL_CONF
 * @{
 */

#ifndef HALCONF_H
#define HALCONF_H

#include "mcuconf.h"

/**
 * @brief   Enables the TM subsystem.
 */
#if !defined(HAL_USE_TM) || defined(__DOXYGEN__)
#define HAL_USE_TM                  TRUE
#endif

/**
 * @brief   Enables the PAL subsystem.
 */
#if !defined(HAL_USE_PAL) || defined(__DOXYGEN__)
#define HAL_USE_PAL                 TRUE
#endif

/**
 * @brief   Enables the ADC subsystem.
 */
#if !defined(HAL_USE_ADC) || defined(__DOXYGEN__)
#define HAL_USE_ADC                 FALSE
#endif

/**
 * @brief   Enables the CAN subsystem.
 */
#if !defined(HAL_USE_CAN) || defined(__DOXYGEN__)
#define HAL_USE_CAN                 FALSE
#endif

/**
 * @brief   Enables the DAC subsystem.
 */
#if !defined(HAL_USE_DAC) || defined(__DOXYGEN__)
#define HAL_USE_DAC                 FALSE
#endif

/**
 * @brief   Enables the EXT subsystem.
 */
#if !defined(HAL_USE_EXT) || defined(__DOXYGEN__)
#define HAL_USE_EXT                 FALSE
#endif

/**
 * @brief   Enables the GPT subsystem.
 */
#if !defined(HAL_USE_GPT) || defined(__DOXYGEN__)
#define HAL_USE_GPT                 FALSE
#endif

/**
 * @brief   Enables the I2C subsystem.
 */
#if !defined(HAL_USE_I2C) || defined(__DOXYGEN__)
#define HAL_USE_I2C                 FALSE
#endif

/**
 * @brief   Enables the I2S subsystem.
 */
#if !defined(HAL_USE_I2S) || defined(__DOXYGEN__)
#define HAL_USE_I2S                 FALSE
#endif

/**
 * @brief   Enables the ICU subsystem.
 */
#if !defined(HAL_USE_ICU) || defined(__DOXYGEN__)
#define HAL_USE_ICU                 FALSE
#endif

/**
 * @brief   Enables the MAC subsystem.
 */
#if !defined(HAL_USE_MAC) || defined(__DOXYGEN__)
#define HAL_USE_MAC                 FALSE
#endif

/**
 * @brief   Enables the MMC_SPI subsystem.
 */
#if !defined(HAL_USE_MMC_SPI) || defined(__DOXYGEN__)
#define HAL_USE_MMC_SPI             FALSE
#endif

/**
 * @brief   Enables the PWM subsystem.
 */
#if !defined(HAL_USE_PWM) || defined(__DOXYGEN__)
#define HAL_USE_PWM                 FALSE
#endif

/**
 * @brief   Enables the QSPI subsystem.
 */
#if !defined(HAL_USE_QSPI) || defined(__DOXYGEN__)
#define HAL_USE_QSPI                FALSE
#endif

/**
 * @brief   Enables the RTC subsystem.
 */
#if !defined(HAL_USE_RTC) || defined(__DOXYGEN__)
#define HAL_USE_RTC                 FALSE
#endif

/**
 * @brief   Enables the SDC subsystem.
 */
#if !defined(HAL_USE_SDC) || defined(__DOXYGEN__)
#define HAL_USE_SDC                 FALSE
#endif

/**
 * @brief   Enables the SERIAL subsystem.
 */
#if !defined(HAL_USE_SERIAL) || defined(__DOXYGEN__)
#define HAL_USE_SERIAL              FALSE
#endif

/**
 * @brief   Enables the SERIAL over USB subsystem.
 */
#if !defined(HAL_USE_SERIAL_USB) || defined(__DOXYGEN__)
#define HAL_USE_SERIAL_USB          FALSE
#endif

/**
 * @brief   Enables the SPI subsystem.
 */
#if !defined(HAL_USE_SPI) || defined(__DOXYGEN__)
#define HAL_USE_SPI                 TRUE
#endif

/**
 * @brief   Enables the UART subsystem.
 */
#if !defined(HAL_USE_UART) || defined(__DOXYGEN__)
#define HAL_USE_UART                FALSE
#endif

/**
 * @brief   Enables the USB subsystem.
 */
#if !defined(HAL_USE_USB) || defined(__DOXYGEN__)
#define HAL_USE_USB                 FALSE
#endif

/**
 * @brief   Enables the WDG subsystem.
 */
#if !defined(HAL_USE_WDG) || defined(__DOXYGEN__)
#define HAL_USE_WDG                 FALSE
#endif

/*===========================================================================*/
/* ADC driver related settings.                                              */
/*===========================================================================*/

/**
 * @brief   Enables synchronous APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(ADC_USE_WAIT) || defined(__DOXYGEN__)
#define ADC_USE_WAIT                TRUE
#endif

/**
 * @brief   Enables the @p adcAcquireBus() and @p adcReleaseBus() APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(ADC_USE_MUTUAL_EXCLUSION) || defined(__DOXYGEN__)
#define ADC_USE_MUTUAL_EXCLUSION    TRUE
#endif

/*===========================================================================*/
/* CAN driver related settings.                                              */
/*===========================================================================*/

/**
 * @brief   Sleep mode related APIs inclusion switch.
 */
#if !defined(CAN_USE_SLEEP_MODE) || defined(__DOXYGEN__)
#define CAN_USE_SLEEP_MODE          TRUE
#endif

/*===========================================================================*/
/* I2C driver related settings.                                              */
/*===========================================================================*/

/**
 * @brief   Enables the mutual exclusion APIs on the I2C bus.
 */
#if !defined(I2C_USE_MUTUAL_EXCLUSION) || defined(__DOXYGEN__)
#define I2C_USE_MUTUAL_EXCLUSION    TRUE
#endif

/*===========================================================================*/
/* MAC driver related settings.                                              */
/*===========================================================================*/

/**
 * @brief   Enables an event sources for incoming packets.
 */
#if !defined(MAC_USE_ZERO_COPY) || defined(__DOXYGEN__)
#define MAC_USE_ZERO_COPY           FALSE
#endif

/**
 * @brief   Enables an event sources for incoming packets.
 */
#if !defined(MAC_USE_EVENTS) || defined(__DOXYGEN__)
#define MAC_USE_EVENTS              TRUE
#endif

/*===========================================================================*/
/* MMC_SPI driver related settings.                                          */
/*===========================================================================*/

/**
 * @brief   Delays insertions.
 * @details If enabled this options inserts delays into the MMC waiting
 *          routines releasing some extra CPU time for the threads with
 *          lower priority, this may slow down the driver a bit however.
 *          This option is recommended also if the SPI driver does not
 *          use a DMA channel and heavily loads the CPU.
 */
#if !defined(MMC_NICE_WAITING) || defined(__DOXYGEN__)
#define MMC_NICE_WAITING            TRUE
#endif

/*===========================================================================*/
/* SDC driver related settings.                                              */
/*===========================================================================*/

/**
 * @brief   Number of initialization attempts before rejecting the card.
 * @note    Attempts are performed at 10mS intervals.
 */
#if !defined(SDC_INIT_RETRY) || defined(__DOXYGEN__)
#define SDC_INIT_RETRY              100
#endif

/**
 * @brief   Include support for MMC cards.
 * @note    MMC support is not yet implemented so this option must be kept
 *          at @p FALSE.
 */
#if !defined(SDC_MMC_SUPPORT) || defined(__DOXYGEN__)
#define SDC_MMC_SUPPORT             FALSE
#endif

/**
 * @brief   Delays insertions.
 * @details If enabled this options inserts delays into the MMC waiting
 *          routines releasing some extra CPU time for the threads with
 *          lower priority, this may slow down the driver a bit however.
 */
#if !defined(SDC_NICE_WAITING) || defined(__DOXYGEN__)
#define SDC_NICE_WAITING            TRUE
#endif

/*===========================================================================*/
/* SERIAL driver related settings.                                           */
/*===========================================================================*/

/**
 * @brief   Default bit rate.
 * @details Configuration parameter, this is the baud rate selected for the
 *          default configuration.
 */
#if !defined(SERIAL_DEFAULT_BITRATE) || defined(__DOXYGEN__)
#define SERIAL_DEFAULT_BITRATE      38400
#endif

/**
 * @brief   Serial buffers size.
 * @details Configuration parameter, you can change the depth of the queue
 *          buffers depending on the requirements of your application.
 * @note    The default is 16 bytes for both the transmission and receive
 *          buffers.
 */
#if !defined(SERIAL_BUFFERS_SIZE) || defined(__DOXYGEN__)
#define SERIAL_BUFFERS_SIZE         16
#endif

/*===========================================================================*/
/* SERIAL_USB driver related setting.                                        */
/*===========================================================================*/

/**
 * @brief   Serial over USB buffers size.
 * @details Configuration parameter, the buffer size must be a multiple of
 *          the USB data endpoint maximum packet size.
 * @note    The default is 256 bytes for both the transmission and receive
 *          buffers.
 */
#if !defined(SERIAL_USB_BUFFERS_SIZE) || defined(__DOXYGEN__)
#define SERIAL_USB_BUFFERS_SIZE     256
#endif

/**
 * @brief   Serial over USB number of buffers.
 * @note    The default is 2 buffers.
 */
#if !defined(SERIAL_USB_BUFFERS_NUMBER) || defined(__DOXYGEN__)
#define SERIAL_USB_BUFFERS_NUMBER   2
#endif

/*===========================================================================*/
/* SPI driver related settings.                                              */
/*===========================================================================*/

/**
 * @brief   Enables synchronous APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(SPI_USE_WAIT) || defined(__DOXYGEN__)
#define SPI_USE_WAIT                TRUE
#endif

/**
 * @brief   Enables the @p spiAcquireBus() and @p spiReleaseBus() APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(SPI_USE_MUTUAL_EXCLUSION) || defined(__DOXYGEN__)
#define SPI_USE_MUTUAL_EXCLUSION    TRUE
#endif

/*===========================================================================*/
/* UART driver related settings.                                             */
/*===========================================================================*/

/**
 * @brief   Enables synchronous APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(UART_USE_WAIT) || defined(__DOXYGEN__)
#define UART_USE_WAIT               FALSE
#endif

/**
 * @brief   Enables the @p uartAcquireBus() and @p uartReleaseBus() APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(UART_USE_MUTUAL_EXCLUSION) || defined(__DOXYGEN__)
#define UART_USE_MUTUAL_EXCLUSION   FALSE
#endif

/*===========================================================================*/
/* USB driver related settings.                                              */
/*===========================================================================*/

/**
 * @brief   Enables synchronous APIs.
 * @note    Disabling this option saves both code and data space.
 */
#if !defined(USB_USE_WAIT) || defined(__DOXYGEN__)
#define USB_USE_WAIT                FALSE
#endif

#endif /* HALCONF_H */

/** @} */