#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cpu.h" static bool_t __cpuinitdata use_xsave = 1; boolean_param("xsave", use_xsave); unsigned int __devinitdata opt_cpuid_mask_ecx = ~0u; integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx); unsigned int __devinitdata opt_cpuid_mask_edx = ~0u; integer_param("cpuid_mask_edx", opt_cpuid_mask_edx); unsigned int __devinitdata opt_cpuid_mask_xsave_eax = ~0u; integer_param("cpuid_mask_xsave_eax", opt_cpuid_mask_xsave_eax); unsigned int __devinitdata opt_cpuid_mask_ext_ecx = ~0u; integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx); unsigned int __devinitdata opt_cpuid_mask_ext_edx = ~0u; integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx); struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; unsigned int paddr_bits __read_mostly = 36; /* * Default host IA32_CR_PAT value to cover all memory types. * BIOS usually sets it to 0x07040600070406. */ u64 host_pat = 0x050100070406; static unsigned int __cpuinitdata cleared_caps[NCAPINTS]; void __init setup_clear_cpu_cap(unsigned int cap) { __clear_bit(cap, boot_cpu_data.x86_capability); __set_bit(cap, cleared_caps); } static void default_init(struct cpuinfo_x86 * c) { /* Not much we can do here... */ /* Check if at least it has cpuid */ BUG_ON(c->cpuid_level == -1); __clear_bit(X86_FEATURE_SEP, c->x86_capability); } static struct cpu_dev default_cpu = { .c_init = default_init, .c_vendor = "Unknown", }; static struct cpu_dev * this_cpu = &default_cpu; bool_t opt_cpu_info; boolean_param("cpuinfo", opt_cpu_info); int __cpuinit get_model_name(struct cpuinfo_x86 *c) { unsigned int *v; char *p, *q; if (cpuid_eax(0x80000000) < 0x80000004) return 0; v = (unsigned int *) c->x86_model_id; cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); c->x86_model_id[48] = 0; /* Intel chips right-justify this string for some dumb reason; undo that brain damage */ p = q = &c->x86_model_id[0]; while ( *p == ' ' ) p++; if ( p != q ) { while ( *p ) *q++ = *p++; while ( q <= &c->x86_model_id[48] ) *q++ = '\0'; /* Zero-pad the rest */ } return 1; } void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) { unsigned int n, dummy, ecx, edx, l2size; n = cpuid_eax(0x80000000); if (n >= 0x80000005) { cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); if (opt_cpu_info) printk("CPU: L1 I cache %dK (%d bytes/line)," " D cache %dK (%d bytes/line)\n", edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); c->x86_cache_size=(ecx>>24)+(edx>>24); } if (n < 0x80000006) /* Some chips just has a large L1. */ return; ecx = cpuid_ecx(0x80000006); l2size = ecx >> 16; c->x86_cache_size = l2size; if (opt_cpu_info) printk("CPU: L2 Cache: %dK (%d bytes/line)\n", l2size, ecx & 0xFF); } static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) { char *v = c->x86_vendor_id; int i; static int printed; for (i = 0; i < X86_VENDOR_NUM; i++) { if (cpu_devs[i]) { if (!strcmp(v,cpu_devs[i]->c_ident[0]) || (cpu_devs[i]->c_ident[1] && !strcmp(v,cpu_devs[i]->c_ident[1]))) { c->x86_vendor = i; if (!early) this_cpu = cpu_devs[i]; return; } } } if (!printed) { printed++; printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); printk(KERN_ERR "CPU: Your system may be unstable.\n"); } c->x86_vendor = X86_VENDOR_UNKNOWN; this_cpu = &default_cpu; } /* Do minimum CPU detection early. Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. The others are not touched to avoid unwanted side effects. WARNING: this function is only called on the BP. Don't add code here that is supposed to run on all CPUs. */ static void __init early_cpu_detect(void) { struct cpuinfo_x86 *c = &boot_cpu_data; u32 cap4, tfms, cap0, misc; c->x86_cache_alignment = 32; /* Get vendor name */ cpuid(0x00000000, &c->cpuid_level, (int *)&c->x86_vendor_id[0], (int *)&c->x86_vendor_id[8], (int *)&c->x86_vendor_id[4]); get_cpu_vendor(c, 1); cpuid(0x00000001, &tfms, &misc, &cap4, &cap0); c->x86 = (tfms >> 8) & 15; c->x86_model = (tfms >> 4) & 15; if (c->x86 == 0xf) c->x86 += (tfms >> 20) & 0xff; if (c->x86 >= 0x6) c->x86_model += ((tfms >> 16) & 0xF) << 4; c->x86_mask = tfms & 15; cap0 &= ~cleared_caps[0]; cap4 &= ~cleared_caps[4]; if (cap0 & (1<<19)) c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; /* Leaf 0x1 capabilities filled in early for Xen. */ c->x86_capability[0] = cap0; c->x86_capability[4] = cap4; } static void __cpuinit generic_identify(struct cpuinfo_x86 *c) { u32 tfms, xlvl, capability, excap, ebx; /* Get vendor name */ cpuid(0x00000000, &c->cpuid_level, (int *)&c->x86_vendor_id[0], (int *)&c->x86_vendor_id[8], (int *)&c->x86_vendor_id[4]); get_cpu_vendor(c, 0); /* Initialize the standard set of capabilities */ /* Note that the vendor-specific code below might override */ /* Intel-defined flags: level 0x00000001 */ cpuid(0x00000001, &tfms, &ebx, &excap, &capability); c->x86_capability[0] = capability; c->x86_capability[4] = excap; c->x86 = (tfms >> 8) & 15; c->x86_model = (tfms >> 4) & 15; if (c->x86 == 0xf) c->x86 += (tfms >> 20) & 0xff; if (c->x86 >= 0x6) c->x86_model += ((tfms >> 16) & 0xF) << 4; c->x86_mask = tfms & 15; if ( cpu_has(c, X86_FEATURE_CLFLSH) ) c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; /* AMD-defined flags: level 0x80000001 */ xlvl = cpuid_eax(0x80000000); if ( (xlvl & 0xffff0000) == 0x80000000 ) { if ( xlvl >= 0x80000001 ) { c->x86_capability[1] = cpuid_edx(0x80000001); c->x86_capability[6] = cpuid_ecx(0x80000001); } if ( xlvl >= 0x80000004 ) get_model_name(c); /* Default name */ if ( xlvl >= 0x80000008 ) paddr_bits = cpuid_eax(0x80000008) & 0xff; } /* Intel-defined flags: level 0x00000007 */ if ( c->cpuid_level >= 0x00000007 ) { u32 dummy; cpuid_count(0x00000007, 0, &dummy, &ebx, &dummy, &dummy); c->x86_capability[X86_FEATURE_FSGSBASE / 32] = ebx; } early_intel_workaround(c); #ifdef CONFIG_X86_HT c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; #endif } /* * This does the hard work of actually picking apart the CPU stuff... */ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) { int i; c->x86_cache_size = -1; c->x86_vendor = X86_VENDOR_UNKNOWN; c->cpuid_level = -1; /* CPUID not detected */ c->x86_model = c->x86_mask = 0; /* So far unknown... */ c->x86_vendor_id[0] = '\0'; /* Unset */ c->x86_model_id[0] = '\0'; /* Unset */ c->x86_max_cores = 1; c->x86_num_siblings = 1; c->x86_clflush_size = 0; c->phys_proc_id = BAD_APICID; c->cpu_core_id = BAD_APICID; c->compute_unit_id = BAD_APICID; memset(&c->x86_capability, 0, sizeof c->x86_capability); generic_identify(c); #ifdef NOISY_CAPS printk(KERN_DEBUG "CPU: After generic identify, caps:"); for (i = 0; i < NCAPINTS; i++) printk(" %08x", c->x86_capability[i]); printk("\n"); #endif if (this_cpu->c_identify) { this_cpu->c_identify(c); #ifdef NOISY_CAPS printk(KERN_DEBUG "CPU: After vendor identify, caps:"); for (i = 0; i < NCAPINTS; i++) printk(" %08x", c->x86_capability[i]); printk("\n"); #endif } /* * Vendor-specific initialization. In this section we * canonicalize the feature flags, meaning if there are * features a certain CPU supports which CPUID doesn't * tell us, CPUID claiming incorrect flags, or other bugs, * we handle them here. * * At the end of this section, c->x86_capability better * indicate the features this CPU genuinely supports! */ if (this_cpu->c_init) this_cpu->c_init(c); /* Initialize xsave/xrstor features */ if ( !use_xsave ) clear_bit(X86_FEATURE_XSAVE, boot_cpu_data.x86_capability); if ( cpu_has_xsave ) xstate_init(c == &boot_cpu_data); /* * The vendor-specific functions might have changed features. Now * we do "generic changes." */ for (i = 0 ; i < NCAPINTS ; ++i) c->x86_capability[i] &= ~cleared_caps[i]; /* If the model name is still unset, do table lookup. */ if ( !c->x86_model_id[0] ) { /* Last resort... */ snprintf(c->x86_model_id, sizeof(c->x86_model_id), "%02x/%02x", c->x86_vendor, c->x86_model); } /* Now the feature flags better reflect actual CPU features! */ #ifdef NOISY_CAPS printk(KERN_DEBUG "CPU: After all inits, caps:"); for (i = 0; i < NCAPINTS; i++) printk(" %08x", c->x86_capability[i]); printk("\n"); #endif /* * On SMP, boot_cpu_data holds the common feature set between * all CPUs; so make sure that we indicate which features are * common between the CPUs. The first time this routine gets * executed, c == &boot_cpu_data. */ if ( c != &boot_cpu_data ) { /* AND the already accumulated flags with these */ for ( i = 0 ; i < NCAPINTS ; i++ ) boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; mcheck_init(c, 0); } else { mcheck_init(c, 1); mtrr_bp_init(); } } /* cpuid returns the value latched in the HW at reset, not the APIC ID * register's value. For any box whose BIOS changes APIC IDs, like * clustered APIC systems, we must use hard_smp_processor_id. * * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. */ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) { return hard_smp_processor_id() >> index_msb; } /* leaf 0xb SMT level */ #define SMT_LEVEL 0 /* leaf 0xb sub-leaf types */ #define INVALID_TYPE 0 #define SMT_TYPE 1 #define CORE_TYPE 2 #define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff) #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f) #define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff) /* * Check for extended topology enumeration cpuid leaf 0xb and if it * exists, use it for cpu topology detection. */ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) { unsigned int eax, ebx, ecx, edx, sub_index; unsigned int ht_mask_width, core_plus_mask_width; unsigned int core_select_mask, core_level_siblings; unsigned int initial_apicid; if ( c->cpuid_level < 0xb ) return; cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); /* Check if the cpuid leaf 0xb is actually implemented */ if ( ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE) ) return; set_bit(X86_FEATURE_XTOPOLOGY, c->x86_capability); initial_apicid = edx; /* Populate HT related information from sub-leaf level 0 */ core_level_siblings = c->x86_num_siblings = LEVEL_MAX_SIBLINGS(ebx); core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); sub_index = 1; do { cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx); /* Check for the Core type in the implemented sub leaves */ if ( LEAFB_SUBTYPE(ecx) == CORE_TYPE ) { core_level_siblings = LEVEL_MAX_SIBLINGS(ebx); core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); break; } sub_index++; } while ( LEAFB_SUBTYPE(ecx) != INVALID_TYPE ); core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; c->cpu_core_id = phys_pkg_id(initial_apicid, ht_mask_width) & core_select_mask; c->phys_proc_id = phys_pkg_id(initial_apicid, core_plus_mask_width); c->apicid = phys_pkg_id(initial_apicid, 0); c->x86_max_cores = (core_level_siblings / c->x86_num_siblings); if ( opt_cpu_info ) { printk("CPU: Physical Processor ID: %d\n", c->phys_proc_id); if ( c->x86_max_cores > 1 ) printk("CPU: Processor Core ID: %d\n", c->cpu_core_id); } } #ifdef CONFIG_X86_HT void __cpuinit detect_ht(struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; int index_msb, core_bits; cpuid(1, &eax, &ebx, &ecx, &edx); c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY) || cpu_has(c, X86_FEATURE_XTOPOLOGY)) return; c->x86_num_siblings = (ebx & 0xff0000) >> 16; if (c->x86_num_siblings == 1) { printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); } else if (c->x86_num_siblings > 1 ) { if (c->x86_num_siblings > nr_cpu_ids) { printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", c->x86_num_siblings); c->x86_num_siblings = 1; return; } index_msb = get_count_order(c->x86_num_siblings); c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); if (opt_cpu_info) printk("CPU: Physical Processor ID: %d\n", c->phys_proc_id); c->x86_num_siblings = c->x86_num_siblings / c->x86_max_cores; index_msb = get_count_order(c->x86_num_siblings) ; core_bits = get_count_order(c->x86_max_cores); c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & ((1 << core_bits) - 1); if (opt_cpu_info && c->x86_max_cores > 1) printk("CPU: Processor Core ID: %d\n", c->cpu_core_id); } } #endif void __cpuinit print_cpu_info(unsigned int cpu) { const struct cpuinfo_x86 *c = cpu_data + cpu; const char *vendor = NULL; if (!opt_cpu_info) return; printk("CPU%u: ", cpu); if (c->x86_vendor < X86_VENDOR_NUM) vendor = this_cpu->c_vendor; else vendor = c->x86_vendor_id; if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) printk("%s ", vendor); if (!c->x86_model_id[0]) printk("%d86", c->x86); else printk("%s", c->x86_model_id); printk(" stepping %02x\n", c->x86_mask); } static cpumask_t cpu_initialized; /* This is hacky. :) * We're emulating future behavior. * In the future, the cpu-specific init functions will be called implicitly * via the magic of initcalls. * They will insert themselves into the cpu_devs structure. * Then, when cpu_init() is called, we can just iterate over that array. */ void __init early_cpu_init(void) { intel_cpu_init(); amd_init_cpu(); centaur_init_cpu(); early_cpu_detect(); } /* * cpu_init() initializes state that is per-CPU. Some data is already * initialized (naturally) in the bootstrap process, such as the GDT * and IDT. We reload them nevertheless, this function acts as a * 'CPU state barrier', nothing should get across. */ void __cpuinit cpu_init(void) { int cpu = smp_processor_id(); struct tss_struct *t = &this_cpu(init_tss); struct desc_ptr gdt_desc = { .base = (unsigned long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY), .limit = LAST_RESERVED_GDT_BYTE }; if (cpumask_test_and_set_cpu(cpu, &cpu_initialized)) { printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); for (;;) local_irq_enable(); } if (opt_cpu_info) printk("Initializing CPU#%d\n", cpu); if (cpu_has_pat) wrmsrl(MSR_IA32_CR_PAT, host_pat); /* Install correct page table. */ write_ptbase(current); asm volatile ( "lgdt %0" : : "m" (gdt_desc) ); /* No nested task. */ asm volatile ("pushf ; andw $0xbfff,(%"__OP"sp) ; popf" ); /* Ensure FPU gets initialised for each domain. */ stts(); /* Set up and load the per-CPU TSS and LDT. */ t->bitmap = IOBMP_INVALID_OFFSET; /* Bottom-of-stack must be 16-byte aligned! */ BUG_ON((get_stack_bottom() & 15) != 0); t->rsp0 = get_stack_bottom(); load_TR(); asm volatile ( "lldt %%ax" : : "a" (0) ); /* Clear all 6 debug registers: */ #define CD(register) asm volatile ( "mov %0,%%db" #register : : "r"(0UL) ); CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7); #undef CD } void cpu_uninit(unsigned int cpu) { cpumask_clear_cpu(cpu, &cpu_initialized); } '#n370'>370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
/*
    ChibiOS/RT - Copyright (C) 2006,2007,2008,2009,2010,
                 2011,2012,2013 Giovanni Di Sirio.

    This file is part of ChibiOS/RT.

    ChibiOS/RT is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 3 of the License, or
    (at your option) any later version.

    ChibiOS/RT is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
*/

/**
 * @file    STM32/mac_lld.c
 * @brief   STM32 low level MAC driver code.
 *
 * @addtogroup MAC
 * @{
 */

#include <string.h>

#include "ch.h"
#include "hal.h"
#include "mii.h"

#if HAL_USE_MAC || defined(__DOXYGEN__)

/*===========================================================================*/
/* Driver local definitions.                                                 */
/*===========================================================================*/

#define BUFFER_SIZE ((((STM32_MAC_BUFFERS_SIZE - 1) | 3) + 1) / 4)

/* MII divider optimal value.*/
#if (STM32_HCLK >= 150000000)
#define MACMIIDR_CR ETH_MACMIIAR_CR_Div102
#elif (STM32_HCLK >= 100000000)
#define MACMIIDR_CR ETH_MACMIIAR_CR_Div62
#elif (STM32_HCLK >= 60000000)
#define MACMIIDR_CR     ETH_MACMIIAR_CR_Div42
#elif (STM32_HCLK >= 35000000)
#define MACMIIDR_CR     ETH_MACMIIAR_CR_Div26
#elif (STM32_HCLK >= 20000000)
#define MACMIIDR_CR     ETH_MACMIIAR_CR_Div16
#else
#error "STM32_HCLK below minimum frequency for ETH operations (20MHz)"
#endif

/*===========================================================================*/
/* Driver exported variables.                                                */
/*===========================================================================*/

/**
 * @brief   Ethernet driver 1.
 */
MACDriver ETHD1;

/*===========================================================================*/
/* Driver local variables.                                                   */
/*===========================================================================*/

static const uint8_t default_mac_address[] = {0xAA, 0x55, 0x13,
                                              0x37, 0x01, 0x10};

static stm32_eth_rx_descriptor_t rd[STM32_MAC_RECEIVE_BUFFERS];
static stm32_eth_tx_descriptor_t td[STM32_MAC_TRANSMIT_BUFFERS];

static uint32_t rb[STM32_MAC_RECEIVE_BUFFERS][BUFFER_SIZE];
static uint32_t tb[STM32_MAC_TRANSMIT_BUFFERS][BUFFER_SIZE];

/*===========================================================================*/
/* Driver local functions.                                                   */
/*===========================================================================*/

/**
 * @brief   Writes a PHY register.
 *
 * @param[in] macp      pointer to the @p MACDriver object
 * @param[in] reg       register number
 * @param[in] value     new register value
 */
static void mii_write(MACDriver *macp, uint32_t reg, uint32_t value) {

  ETH->MACMIIDR = value;
  ETH->MACMIIAR = macp->phyaddr | (reg << 6) | MACMIIDR_CR |
                  ETH_MACMIIAR_MW | ETH_MACMIIAR_MB;
  while ((ETH->MACMIIAR & ETH_MACMIIAR_MB) != 0)
    ;
}

/**
 * @brief   Reads a PHY register.
 *
 * @param[in] macp      pointer to the @p MACDriver object
 * @param[in] reg       register number
 *
 * @return              The PHY register content.
 */
static uint32_t mii_read(MACDriver *macp, uint32_t reg) {

  ETH->MACMIIAR = macp->phyaddr | (reg << 6) | MACMIIDR_CR | ETH_MACMIIAR_MB;
  while ((ETH->MACMIIAR & ETH_MACMIIAR_MB) != 0)
    ;
  return ETH->MACMIIDR;
}

#if !defined(BOARD_PHY_ADDRESS)
/**
 * @brief   PHY address detection.
 *
 * @param[in] macp      pointer to the @p MACDriver object
 */
static void mii_find_phy(MACDriver *macp) {
  uint32_t i;

#if STM32_MAC_PHY_TIMEOUT > 0
  halrtcnt_t start = halGetCounterValue();
  halrtcnt_t timeout  = start + MS2RTT(STM32_MAC_PHY_TIMEOUT);
  while (halIsCounterWithin(start, timeout)) {
#endif
    for (i = 0; i < 31; i++) {
      macp->phyaddr = i << 11;
      ETH->MACMIIDR = (i << 6) | MACMIIDR_CR;
      if ((mii_read(macp, MII_PHYSID1) == (BOARD_PHY_ID >> 16)) &&
          ((mii_read(macp, MII_PHYSID2) & 0xFFF0) == (BOARD_PHY_ID & 0xFFF0))) {
        return;
      }
    }
#if STM32_MAC_PHY_TIMEOUT > 0
  }
#endif
  /* Wrong or defective board.*/
  chSysHalt();
}
#endif

/**
 * @brief   MAC address setup.
 *
 * @param[in] p         pointer to a six bytes buffer containing the MAC
 *                      address
 */
static void mac_lld_set_address(const uint8_t *p) {

  /* MAC address configuration, only a single address comparator is used,
     hash table not used.*/
  ETH->MACA0HR   = ((uint32_t)p[5] << 8) |
                   ((uint32_t)p[4] << 0);
  ETH->MACA0LR   = ((uint32_t)p[3] << 24) |
                   ((uint32_t)p[2] << 16) |
                   ((uint32_t)p[1] << 8) |
                   ((uint32_t)p[0] << 0);
  ETH->MACA1HR   = 0x0000FFFF;
  ETH->MACA1LR   = 0xFFFFFFFF;
  ETH->MACA2HR   = 0x0000FFFF;
  ETH->MACA2LR   = 0xFFFFFFFF;
  ETH->MACA3HR   = 0x0000FFFF;
  ETH->MACA3LR   = 0xFFFFFFFF;
  ETH->MACHTHR   = 0;
  ETH->MACHTLR   = 0;
}

/*===========================================================================*/
/* Driver interrupt handlers.                                                */
/*===========================================================================*/

CH_IRQ_HANDLER(ETH_IRQHandler) {
  uint32_t dmasr;

  CH_IRQ_PROLOGUE();

  dmasr = ETH->DMASR;
  ETH->DMASR = dmasr; /* Clear status bits.*/

  if (dmasr & ETH_DMASR_RS) {
    /* Data Received.*/
    chSysLockFromIsr();
    chSemResetI(&ETHD1.rdsem, 0);
#if MAC_USE_EVENTS
    chEvtBroadcastI(&ETHD1.rdevent);
#endif
    chSysUnlockFromIsr();
  }

  if (dmasr & ETH_DMASR_TS) {
    /* Data Transmitted.*/
    chSysLockFromIsr();
    chSemResetI(&ETHD1.tdsem, 0);
    chSysUnlockFromIsr();
  }

  CH_IRQ_EPILOGUE();
}

/*===========================================================================*/
/* Driver exported functions.                                                */
/*===========================================================================*/

/**
 * @brief   Low level MAC initialization.
 *
 * @notapi
 */
void mac_lld_init(void) {
  unsigned i;

  macObjectInit(&ETHD1);
  ETHD1.link_up = FALSE;

  /* Descriptor tables are initialized in chained mode, note that the first
     word is not initialized here but in mac_lld_start().*/
  for (i = 0; i < STM32_MAC_RECEIVE_BUFFERS; i++) {
    rd[i].rdes1 = STM32_RDES1_RCH | STM32_MAC_BUFFERS_SIZE;
    rd[i].rdes2 = (uint32_t)rb[i];
    rd[i].rdes3 = (uint32_t)&rd[(i + 1) % STM32_MAC_RECEIVE_BUFFERS];
  }
  for (i = 0; i < STM32_MAC_TRANSMIT_BUFFERS; i++) {
    td[i].tdes1 = 0;
    td[i].tdes2 = (uint32_t)tb[i];
    td[i].tdes3 = (uint32_t)&td[(i + 1) % STM32_MAC_TRANSMIT_BUFFERS];
  }

  /* Selection of the RMII or MII mode based on info exported by board.h.*/
#if defined(STM32F10X_CL)
#if defined(BOARD_PHY_RMII)
  AFIO->MAPR |= AFIO_MAPR_MII_RMII_SEL;
#else
  AFIO->MAPR &= ~AFIO_MAPR_MII_RMII_SEL;
#endif
#elif defined(STM32F2XX) || defined(STM32F4XX)
#if defined(BOARD_PHY_RMII)
  SYSCFG->PMC |= SYSCFG_PMC_MII_RMII_SEL;
#else
  SYSCFG->PMC &= ~SYSCFG_PMC_MII_RMII_SEL;
#endif
#else
#error "unsupported STM32 platform for MAC driver"
#endif

  /* Reset of the MAC core.*/
  rccResetETH();

  /* MAC clocks temporary activation.*/
  rccEnableETH(FALSE);

  /* PHY address setup.*/
#if defined(BOARD_PHY_ADDRESS)
  ETHD1.phyaddr = BOARD_PHY_ADDRESS << 11;
#else
  mii_find_phy(&ETHD1);
#endif

#if defined(BOARD_PHY_RESET)
  /* PHY board-specific reset procedure.*/
  BOARD_PHY_RESET();
#else
  /* PHY soft reset procedure.*/
  mii_write(&ETHD1, MII_BMCR, BMCR_RESET);
#if defined(BOARD_PHY_RESET_DELAY)
  halPolledDelay(BOARD_PHY_RESET_DELAY);
#endif
  while (mii_read(&ETHD1, MII_BMCR) & BMCR_RESET)
    ;
#endif

#if STM32_MAC_ETH1_CHANGE_PHY_STATE
  /* PHY in power down mode until the driver will be started.*/
  mii_write(&ETHD1, MII_BMCR, mii_read(&ETHD1, MII_BMCR) | BMCR_PDOWN);
#endif

  /* MAC clocks stopped again.*/
  rccDisableETH(FALSE);
}

/**
 * @brief   Configures and activates the MAC peripheral.
 *
 * @param[in] macp      pointer to the @p MACDriver object
 *
 * @notapi
 */
void mac_lld_start(MACDriver *macp) {
  unsigned i;

  /* Resets the state of all descriptors.*/
  for (i = 0; i < STM32_MAC_RECEIVE_BUFFERS; i++)
    rd[i].rdes0 = STM32_RDES0_OWN;
  macp->rxptr = (stm32_eth_rx_descriptor_t *)rd;
  for (i = 0; i < STM32_MAC_TRANSMIT_BUFFERS; i++)
    td[i].tdes0 = STM32_TDES0_TCH;
  macp->txptr = (stm32_eth_tx_descriptor_t *)td;

  /* MAC clocks activation and commanded reset procedure.*/
  rccEnableETH(FALSE);
  ETH->DMABMR |= ETH_DMABMR_SR;
  while(ETH->DMABMR & ETH_DMABMR_SR)
    ;

  /* ISR vector enabled.*/
  nvicEnableVector(ETH_IRQn,
                   CORTEX_PRIORITY_MASK(STM32_MAC_ETH1_IRQ_PRIORITY));

#if STM32_MAC_ETH1_CHANGE_PHY_STATE
  /* PHY in power up mode.*/
  mii_write(macp, MII_BMCR, mii_read(macp, MII_BMCR) & ~BMCR_PDOWN);
#endif

  /* MAC configuration.*/
  ETH->MACFFR    = 0;
  ETH->MACFCR    = 0;
  ETH->MACVLANTR = 0;

  /* MAC address setup.*/
  if (macp->config->mac_address == NULL)
    mac_lld_set_address(default_mac_address);
  else
    mac_lld_set_address(macp->config->mac_address);

  /* Transmitter and receiver enabled.
     Note that the complete setup of the MAC is performed when the link
     status is detected.*/
#if STM32_MAC_IP_CHECKSUM_OFFLOAD
  ETH->MACCR = ETH_MACCR_IPCO | ETH_MACCR_RE | ETH_MACCR_TE;
#else
  ETH->MACCR =                  ETH_MACCR_RE | ETH_MACCR_TE;
#endif

  /* DMA configuration:
     Descriptor chains pointers.*/
  ETH->DMARDLAR = (uint32_t)rd;
  ETH->DMATDLAR = (uint32_t)td;

  /* Enabling required interrupt sources.*/
  ETH->DMASR    = ETH->DMASR;
  ETH->DMAIER   = ETH_DMAIER_NISE | ETH_DMAIER_RIE | ETH_DMAIER_TIE;

  /* DMA general settings.*/
  ETH->DMABMR   = ETH_DMABMR_AAB | ETH_DMABMR_RDP_1Beat | ETH_DMABMR_PBL_1Beat;

  /* Transmit FIFO flush.*/
  ETH->DMAOMR   = ETH_DMAOMR_FTF;
  while (ETH->DMAOMR & ETH_DMAOMR_FTF)
    ;

  /* DMA final configuration and start.*/
  ETH->DMAOMR   = ETH_DMAOMR_DTCEFD | ETH_DMAOMR_RSF | ETH_DMAOMR_TSF |
                  ETH_DMAOMR_ST | ETH_DMAOMR_SR;
}

/**
 * @brief   Deactivates the MAC peripheral.
 *
 * @param[in] macp      pointer to the @p MACDriver object
 *
 * @notapi
 */
void mac_lld_stop(MACDriver *macp) {

  if (macp->state != MAC_STOP) {
#if STM32_MAC_ETH1_CHANGE_PHY_STATE
    /* PHY in power down mode until the driver will be restarted.*/
    mii_write(macp, MII_BMCR, mii_read(macp, MII_BMCR) | BMCR_PDOWN);
#endif

    /* MAC and DMA stopped.*/
    ETH->MACCR    = 0;
    ETH->DMAOMR   = 0;
    ETH->DMAIER   = 0;
    ETH->DMASR    = ETH->DMASR;

    /* MAC clocks stopped.*/
    rccDisableETH(FALSE);

    /* ISR vector disabled.*/
    nvicDisableVector(ETH_IRQn);
  }
}

/**
 * @brief   Returns a transmission descriptor.
 * @details One of the available transmission descriptors is locked and
 *          returned.
 *
 * @param[in] macp      pointer to the @p MACDriver object
 * @param[out] tdp      pointer to a @p MACTransmitDescriptor structure
 * @return              The operation status.
 * @retval RDY_OK       the descriptor has been obtained.
 * @retval RDY_TIMEOUT  descriptor not available.
 *
 * @notapi
 */
msg_t mac_lld_get_transmit_descriptor(MACDriver *macp,
                                      MACTransmitDescriptor *tdp) {
  stm32_eth_tx_descriptor_t *tdes;

  if (!macp->link_up)
    return RDY_TIMEOUT;

  chSysLock();

  /* Get Current TX descriptor.*/
  tdes = macp->txptr;

  /* Ensure that descriptor isn't owned by the Ethernet DMA or locked by
     another thread.*/
  if (tdes->tdes0 & (STM32_TDES0_OWN | STM32_TDES0_LOCKED)) {
    chSysUnlock();
    return RDY_TIMEOUT;
  }

  /* Marks the current descriptor as locked using a reserved bit.*/
  tdes->tdes0 |= STM32_TDES0_LOCKED;

  /* Next TX descriptor to use.*/
  macp->txptr = (stm32_eth_tx_descriptor_t *)tdes->tdes3;

  chSysUnlock();

  /* Set the buffer size and configuration.*/
  tdp->offset   = 0;
  tdp->size     = STM32_MAC_BUFFERS_SIZE;
  tdp->physdesc = tdes;

  return RDY_OK;
}

/**
 * @brief   Releases a transmit descriptor and starts the transmission of the
 *          enqueued data as a single frame.
 *
 * @param[in] tdp       the pointer to the @p MACTransmitDescriptor structure
 *
 * @notapi
 */
void mac_lld_release_transmit_descriptor(MACTransmitDescriptor *tdp) {

  chDbgAssert(!(tdp->physdesc->tdes0 & STM32_TDES0_OWN),
              "mac_lld_release_transmit_descriptor(), #1",
              "attempt to release descriptor already owned by DMA");

  chSysLock();

  /* Unlocks the descriptor and returns it to the DMA engine.*/
  tdp->physdesc->tdes1 = tdp->offset;
  tdp->physdesc->tdes0 = STM32_TDES0_CIC(STM32_MAC_IP_CHECKSUM_OFFLOAD) |
                         STM32_TDES0_IC | STM32_TDES0_LS | STM32_TDES0_FS |
                         STM32_TDES0_TCH | STM32_TDES0_OWN;

  /* If the DMA engine is stalled then a restart request is issued.*/
  if ((ETH->DMASR & ETH_DMASR_TPS) == ETH_DMASR_TPS_Suspended) {
    ETH->DMASR   = ETH_DMASR_TBUS;
    ETH->DMATPDR = ETH_DMASR_TBUS; /* Any value is OK.*/
  }

  chSysUnlock();
}

/**
 * @brief   Returns a receive descriptor.
 *
 * @param[in] macp      pointer to the @p MACDriver object
 * @param[out] rdp      pointer to a @p MACReceiveDescriptor structure
 * @return              The operation status.
 * @retval RDY_OK       the descriptor has been obtained.
 * @retval RDY_TIMEOUT  descriptor not available.
 *
 * @notapi
 */
msg_t mac_lld_get_receive_descriptor(MACDriver *macp,
                                     MACReceiveDescriptor *rdp) {
  stm32_eth_rx_descriptor_t *rdes;

  chSysLock();

  /* Get Current RX descriptor.*/
  rdes = macp->rxptr;

  /* Iterates through received frames until a valid one is found, invalid
     frames are discarded.*/
  while (!(rdes->rdes0 & STM32_RDES0_OWN)) {
    if (!(rdes->rdes0 & (STM32_RDES0_AFM | STM32_RDES0_ES))
#if STM32_MAC_IP_CHECKSUM_OFFLOAD
        && (rdes->rdes0 & STM32_RDES0_FT)
        && !(rdes->rdes0 & (STM32_RDES0_IPHCE | STM32_RDES0_PCE))
#endif
        && (rdes->rdes0 & STM32_RDES0_FS) && (rdes->rdes0 & STM32_RDES0_LS)) {
      /* Found a valid one.*/
      rdp->offset   = 0;
      rdp->size     = ((rdes->rdes0 & STM32_RDES0_FL_MASK) >> 16) - 4;
      rdp->physdesc = rdes;
      macp->rxptr   = (stm32_eth_rx_descriptor_t *)rdes->rdes3;

      chSysUnlock();
      return RDY_OK;
    }
    /* Invalid frame found, purging.*/
    rdes->rdes0 = STM32_RDES0_OWN;
    rdes = (stm32_eth_rx_descriptor_t *)rdes->rdes3;
  }

  /* Next descriptor to check.*/
  macp->rxptr = rdes;

  chSysUnlock();
  return RDY_TIMEOUT;
}

/**
 * @brief   Releases a receive descriptor.
 * @details The descriptor and its buffer are made available for more incoming
 *          frames.
 *
 * @param[in] rdp       the pointer to the @p MACReceiveDescriptor structure
 *
 * @notapi
 */
void mac_lld_release_receive_descriptor(MACReceiveDescriptor *rdp) {

  chDbgAssert(!(rdp->physdesc->rdes0 & STM32_RDES0_OWN),
              "mac_lld_release_receive_descriptor(), #1",
              "attempt to release descriptor already owned by DMA");

  chSysLock();

  /* Give buffer back to the Ethernet DMA.*/
  rdp->physdesc->rdes0 = STM32_RDES0_OWN;

  /* If the DMA engine is stalled then a restart request is issued.*/
  if ((ETH->DMASR & ETH_DMASR_RPS) == ETH_DMASR_RPS_Suspended) {
    ETH->DMASR   = ETH_DMASR_RBUS;
    ETH->DMARPDR = ETH_DMASR_RBUS; /* Any value is OK.*/
  }

  chSysUnlock();
}

/**
 * @brief   Updates and returns the link status.
 *
 * @param[in] macp      pointer to the @p MACDriver object
 * @return              The link status.
 * @retval TRUE         if the link is active.
 * @retval FALSE        if the link is down.
 *
 * @notapi
 */
bool_t mac_lld_poll_link_status(MACDriver *macp) {
  uint32_t maccr, bmsr, bmcr;

  maccr = ETH->MACCR;

  /* PHY CR and SR registers read.*/
  (void)mii_read(macp, MII_BMSR);
  bmsr = mii_read(macp, MII_BMSR);
  bmcr = mii_read(macp, MII_BMCR);

  /* Check on auto-negotiation mode.*/
  if (bmcr & BMCR_ANENABLE) {
    uint32_t lpa;

    /* Auto-negotiation must be finished without faults and link established.*/
    if ((bmsr & (BMSR_LSTATUS | BMSR_RFAULT | BMSR_ANEGCOMPLETE)) !=
        (BMSR_LSTATUS | BMSR_ANEGCOMPLETE))
      return macp->link_up = FALSE;

    /* Auto-negotiation enabled, checks the LPA register.*/
    lpa = mii_read(macp, MII_LPA);

    /* Check on link speed.*/
    if (lpa & (LPA_100HALF | LPA_100FULL | LPA_100BASE4))
      maccr |= ETH_MACCR_FES;
    else
      maccr &= ~ETH_MACCR_FES;

    /* Check on link mode.*/
    if (lpa & (LPA_10FULL | LPA_100FULL))
      maccr |= ETH_MACCR_DM;
    else
      maccr &= ~ETH_MACCR_DM;
  }
  else {
    /* Link must be established.*/
    if (!(bmsr & BMSR_LSTATUS))
      return macp->link_up = FALSE;

    /* Check on link speed.*/
    if (bmcr & BMCR_SPEED100)
      maccr |= ETH_MACCR_FES;
    else
      maccr &= ~ETH_MACCR_FES;

    /* Check on link mode.*/
    if (bmcr & BMCR_FULLDPLX)
      maccr |= ETH_MACCR_DM;
    else
      maccr &= ~ETH_MACCR_DM;
  }

  /* Changes the mode in the MAC.*/
  ETH->MACCR = maccr;

  /* Returns the link status.*/
  return macp->link_up = TRUE;
}

/**
 * @brief   Writes to a transmit descriptor's stream.
 *
 * @param[in] tdp       pointer to a @p MACTransmitDescriptor structure
 * @param[in] buf       pointer to the buffer containing the data to be
 *                      written
 * @param[in] size      number of bytes to be written
 * @return              The number of bytes written into the descriptor's
 *                      stream, this value can be less than the amount
 *                      specified in the parameter @p size if the maximum
 *                      frame size is reached.
 *
 * @notapi
 */
size_t mac_lld_write_transmit_descriptor(MACTransmitDescriptor *tdp,
                                         uint8_t *buf,
                                         size_t size) {

  chDbgAssert(!(tdp->physdesc->tdes0 & STM32_TDES0_OWN),
              "mac_lld_write_transmit_descriptor(), #1",
              "attempt to write descriptor already owned by DMA");

  if (size > tdp->size - tdp->offset)
    size = tdp->size - tdp->offset;

  if (size > 0) {
    memcpy((uint8_t *)(tdp->physdesc->tdes2) + tdp->offset, buf, size);
    tdp->offset += size;
  }
  return size;
}

/**
 * @brief   Reads from a receive descriptor's stream.
 *
 * @param[in] rdp       pointer to a @p MACReceiveDescriptor structure
 * @param[in] buf       pointer to the buffer that will receive the read data
 * @param[in] size      number of bytes to be read
 * @return              The number of bytes read from the descriptor's
 *                      stream, this value can be less than the amount
 *                      specified in the parameter @p size if there are
 *                      no more bytes to read.
 *
 * @notapi
 */
size_t mac_lld_read_receive_descriptor(MACReceiveDescriptor *rdp,
                                       uint8_t *buf,
                                       size_t size) {

  chDbgAssert(!(rdp->physdesc->rdes0 & STM32_RDES0_OWN),
              "mac_lld_read_receive_descriptor(), #1",
              "attempt to read descriptor already owned by DMA");

  if (size > rdp->size - rdp->offset)
    size = rdp->size - rdp->offset;

  if (size > 0) {
    memcpy(buf, (uint8_t *)(rdp->physdesc->rdes2) + rdp->offset, size);
    rdp->offset += size;
  }
  return size;
}

#if MAC_USE_ZERO_COPY || defined(__DOXYGEN__)
/**
 * @brief   Returns a pointer to the next transmit buffer in the descriptor
 *          chain.
 * @note    The API guarantees that enough buffers can be requested to fill
 *          a whole frame.
 *
 * @param[in] tdp       pointer to a @p MACTransmitDescriptor structure
 * @param[in] size      size of the requested buffer. Specify the frame size
 *                      on the first call then scale the value down subtracting
 *                      the amount of data already copied into the previous
 *                      buffers.
 * @param[out] sizep    pointer to variable receiving the buffer size, it is
 *                      zero when the last buffer has already been returned.
 *                      Note that a returned size lower than the amount
 *                      requested means that more buffers must be requested
 *                      in order to fill the frame data entirely.
 * @return              Pointer to the returned buffer.
 * @retval NULL         if the buffer chain has been entirely scanned.
 *
 * @notapi
 */
uint8_t *mac_lld_get_next_transmit_buffer(MACTransmitDescriptor *tdp,
                                          size_t size,
                                          size_t *sizep) {

  if (tdp->offset == 0) {
    *sizep      = tdp->size;
    tdp->offset = size;
    return (uint8_t *)tdp->physdesc->tdes2;
  }
  *sizep = 0;
  return NULL;
}

/**
 * @brief   Returns a pointer to the next receive buffer in the descriptor
 *          chain.
 * @note    The API guarantees that the descriptor chain contains a whole
 *          frame.
 *
 * @param[in] rdp       pointer to a @p MACReceiveDescriptor structure
 * @param[out] sizep    pointer to variable receiving the buffer size, it is
 *                      zero when the last buffer has already been returned.
 * @return              Pointer to the returned buffer.
 * @retval NULL         if the buffer chain has been entirely scanned.
 *
 * @notapi
 */
const uint8_t *mac_lld_get_next_receive_buffer(MACReceiveDescriptor *rdp,
                                               size_t *sizep) {

  if (rdp->size > 0) {
    *sizep      = rdp->size;
    rdp->offset = rdp->size;
    rdp->size   = 0;
    return (uint8_t *)rdp->physdesc->rdes2;
  }
  *sizep = 0;
  return NULL;
}
#endif /* MAC_USE_ZERO_COPY */

#endif /* HAL_USE_MAC */

/** @} */