/* * Copyright (C) 2007 Advanced Micro Devices, Inc. * Author: Leo Duran * Author: Wei Wang - adapted to xen * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include extern unsigned long amd_iommu_page_entries; extern unsigned short ivrs_bdf_entries; extern struct ivrs_mappings *ivrs_mappings; extern unsigned short last_bdf; extern int ioapic_bdf[MAX_IO_APICS]; extern void *shared_intremap_table; static void add_ivrs_mapping_entry( u16 bdf, u16 alias_id, u8 flags, struct amd_iommu *iommu) { u8 sys_mgt, lint1_pass, lint0_pass, nmi_pass, ext_int_pass, init_pass; ASSERT( ivrs_mappings != NULL ); /* setup requestor id */ ivrs_mappings[bdf].dte_requestor_id = alias_id; /* override flags for range of devices */ sys_mgt = get_field_from_byte(flags, AMD_IOMMU_ACPI_SYS_MGT_MASK, AMD_IOMMU_ACPI_SYS_MGT_SHIFT); lint1_pass = get_field_from_byte(flags, AMD_IOMMU_ACPI_LINT1_PASS_MASK, AMD_IOMMU_ACPI_LINT1_PASS_SHIFT); lint0_pass = get_field_from_byte(flags, AMD_IOMMU_ACPI_LINT0_PASS_MASK, AMD_IOMMU_ACPI_LINT0_PASS_SHIFT); nmi_pass = get_field_from_byte(flags, AMD_IOMMU_ACPI_NMI_PASS_MASK, AMD_IOMMU_ACPI_NMI_PASS_SHIFT); ext_int_pass = get_field_from_byte(flags, AMD_IOMMU_ACPI_EINT_PASS_MASK, AMD_IOMMU_ACPI_EINT_PASS_SHIFT); init_pass = get_field_from_byte(flags, AMD_IOMMU_ACPI_INIT_PASS_MASK, AMD_IOMMU_ACPI_INIT_PASS_SHIFT); ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt; ivrs_mappings[bdf].dte_lint1_pass = lint1_pass; ivrs_mappings[bdf].dte_lint0_pass = lint0_pass; ivrs_mappings[bdf].dte_nmi_pass = nmi_pass; ivrs_mappings[bdf].dte_ext_int_pass = ext_int_pass; ivrs_mappings[bdf].dte_init_pass = init_pass; if (ivrs_mappings[alias_id].intremap_table == NULL ) { /* allocate per-device interrupt remapping table */ if ( amd_iommu_perdev_intremap ) ivrs_mappings[alias_id].intremap_table = amd_iommu_alloc_intremap_table(); else { if ( shared_intremap_table == NULL ) shared_intremap_table = amd_iommu_alloc_intremap_table(); ivrs_mappings[alias_id].intremap_table = shared_intremap_table; } } /* assgin iommu hardware */ ivrs_mappings[bdf].iommu = iommu; } static struct amd_iommu * __init find_iommu_from_bdf_cap( u16 bdf, u8 cap_offset) { struct amd_iommu *iommu; for_each_amd_iommu ( iommu ) if ( (iommu->bdf == bdf) && (iommu->cap_offset == cap_offset) ) return iommu; return NULL; } static void __init reserve_iommu_exclusion_range( struct amd_iommu *iommu, uint64_t base, uint64_t limit) { /* need to extend exclusion range? */ if ( iommu->exclusion_enable ) { if ( iommu->exclusion_base < base ) base = iommu->exclusion_base; if ( iommu->exclusion_limit > limit ) limit = iommu->exclusion_limit; } iommu->exclusion_enable = IOMMU_CONTROL_ENABLED; iommu->exclusion_base = base; iommu->exclusion_limit = limit; } static void __init reserve_iommu_exclusion_range_all( struct amd_iommu *iommu, unsigned long base, unsigned long limit) { reserve_iommu_exclusion_range(iommu, base, limit); iommu->exclusion_allow_all = IOMMU_CONTROL_ENABLED; } static void __init reserve_unity_map_for_device( u16 bdf, unsigned long base, unsigned long length, u8 iw, u8 ir) { unsigned long old_top, new_top; /* need to extend unity-mapped range? */ if ( ivrs_mappings[bdf].unity_map_enable ) { old_top = ivrs_mappings[bdf].addr_range_start + ivrs_mappings[bdf].addr_range_length; new_top = base + length; if ( old_top > new_top ) new_top = old_top; if ( ivrs_mappings[bdf].addr_range_start < base ) base = ivrs_mappings[bdf].addr_range_start; length = new_top - base; } /* extend r/w permissioms and keep aggregate */ ivrs_mappings[bdf].write_permission = iw; ivrs_mappings[bdf].read_permission = ir; ivrs_mappings[bdf].unity_map_enable = IOMMU_CONTROL_ENABLED; ivrs_mappings[bdf].addr_range_start = base; ivrs_mappings[bdf].addr_range_length = length; } static int __init register_exclusion_range_for_all_devices( unsigned long base, unsigned long limit, u8 iw, u8 ir) { unsigned long range_top, iommu_top, length; struct amd_iommu *iommu; u16 bdf; /* is part of exclusion range inside of IOMMU virtual address space? */ /* note: 'limit' parameter is assumed to be page-aligned */ range_top = limit + PAGE_SIZE; iommu_top = max_page * PAGE_SIZE; if ( base < iommu_top ) { if ( range_top > iommu_top ) range_top = iommu_top; length = range_top - base; /* reserve r/w unity-mapped page entries for devices */ /* note: these entries are part of the exclusion range */ for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ ) reserve_unity_map_for_device(bdf, base, length, iw, ir); /* push 'base' just outside of virtual address space */ base = iommu_top; } /* register IOMMU exclusion range settings */ if ( limit >= iommu_top ) { for_each_amd_iommu( iommu ) reserve_iommu_exclusion_range_all(iommu, base, limit); } return 0; } static int __init register_exclusion_range_for_device( u16 bdf, unsigned long base, unsigned long limit, u8 iw, u8 ir) { unsigned long range_top, iommu_top, length; struct amd_iommu *iommu; u16 req; iommu = find_iommu_for_device(bdf); if ( !iommu ) { AMD_IOMMU_DEBUG("IVMD Error: No IOMMU for Dev_Id 0x%x!\n", bdf); return -ENODEV; } req = ivrs_mappings[bdf].dte_requestor_id; /* note: 'limit' parameter is assumed to be page-aligned */ range_top = limit + PAGE_SIZE; iommu_top = max_page * PAGE_SIZE; if ( base < iommu_top ) { if ( range_top > iommu_top ) range_top = iommu_top; length = range_top - base; /* reserve unity-mapped page entries for device */ /* note: these entries are part of the exclusion range */ reserve_unity_map_for_device(bdf, base, length, iw, ir); reserve_unity_map_for_device(req, base, length, iw, ir); /* push 'base' just outside of virtual address space */ base = iommu_top; } /* register IOMMU exclusion range settings for device */ if ( limit >= iommu_top ) { reserve_iommu_exclusion_range(iommu, base, limit); ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_ENABLED; ivrs_mappings[req].dte_allow_exclusion = IOMMU_CONTROL_ENABLED; } return 0; } static int __init register_exclusion_range_for_iommu_devices( struct amd_iommu *iommu, unsigned long base, unsigned long limit, u8 iw, u8 ir) { unsigned long range_top, iommu_top, length; u16 bdf, req; /* is part of exclusion range inside of IOMMU virtual address space? */ /* note: 'limit' parameter is assumed to be page-aligned */ range_top = limit + PAGE_SIZE; iommu_top = max_page * PAGE_SIZE; if ( base < iommu_top ) { if ( range_top > iommu_top ) range_top = iommu_top; length = range_top - base; /* reserve r/w unity-mapped page entries for devices */ /* note: these entries are part of the exclusion range */ for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ ) { if ( iommu == find_iommu_for_device(bdf) ) { reserve_unity_map_for_device(bdf, base, length, iw, ir); req = ivrs_mappings[bdf].dte_requestor_id; reserve_unity_map_for_device(req, base, length, iw, ir); } } /* push 'base' just outside of virtual address space */ base = iommu_top; } /* register IOMMU exclusion range settings */ if ( limit >= iommu_top ) reserve_iommu_exclusion_range_all(iommu, base, limit); return 0; } static int __init parse_ivmd_device_select( struct acpi_ivmd_block_header *ivmd_block, unsigned long base, unsigned long limit, u8 iw, u8 ir) { u16 bdf; bdf = ivmd_block->header.dev_id; if ( bdf >= ivrs_bdf_entries ) { AMD_IOMMU_DEBUG("IVMD Error: Invalid Dev_Id 0x%x\n", bdf); return -ENODEV; } return register_exclusion_range_for_device(bdf, base, limit, iw, ir); } static int __init parse_ivmd_device_range( struct acpi_ivmd_block_header *ivmd_block, unsigned long base, unsigned long limit, u8 iw, u8 ir) { u16 first_bdf, last_bdf, bdf; int error; first_bdf = ivmd_block->header.dev_id; if ( first_bdf >= ivrs_bdf_entries ) { AMD_IOMMU_DEBUG( "IVMD Error: Invalid Range_First Dev_Id 0x%x\n", first_bdf); return -ENODEV; } last_bdf = ivmd_block->last_dev_id; if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) ) { AMD_IOMMU_DEBUG( "IVMD Error: Invalid Range_Last Dev_Id 0x%x\n", last_bdf); return -ENODEV; } for ( bdf = first_bdf, error = 0; (bdf <= last_bdf) && !error; bdf++ ) error = register_exclusion_range_for_device( bdf, base, limit, iw, ir); return error; } static int __init parse_ivmd_device_iommu( struct acpi_ivmd_block_header *ivmd_block, unsigned long base, unsigned long limit, u8 iw, u8 ir) { struct amd_iommu *iommu; /* find target IOMMU */ iommu = find_iommu_from_bdf_cap(ivmd_block->header.dev_id, ivmd_block->cap_offset); if ( !iommu ) { AMD_IOMMU_DEBUG("IVMD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n", ivmd_block->header.dev_id, ivmd_block->cap_offset); return -ENODEV; } return register_exclusion_range_for_iommu_devices( iommu, base, limit, iw, ir); } static int __init parse_ivmd_block(struct acpi_ivmd_block_header *ivmd_block) { unsigned long start_addr, mem_length, base, limit; u8 iw, ir; if ( ivmd_block->header.length < sizeof(struct acpi_ivmd_block_header) ) { AMD_IOMMU_DEBUG("IVMD Error: Invalid Block Length!\n"); return -ENODEV; } start_addr = (unsigned long)ivmd_block->start_addr; mem_length = (unsigned long)ivmd_block->mem_length; base = start_addr & PAGE_MASK; limit = (start_addr + mem_length - 1) & PAGE_MASK; AMD_IOMMU_DEBUG("IVMD Block: Type 0x%x\n",ivmd_block->header.type); AMD_IOMMU_DEBUG(" Start_Addr_Phys 0x%lx\n", start_addr); AMD_IOMMU_DEBUG(" Mem_Length 0x%lx\n", mem_length); if ( get_field_from_byte(ivmd_block->header.flags, AMD_IOMMU_ACPI_EXCLUSION_RANGE_MASK, AMD_IOMMU_ACPI_EXCLUSION_RANGE_SHIFT) ) iw = ir = IOMMU_CONTROL_ENABLED; else if ( get_field_from_byte(ivmd_block->header.flags, AMD_IOMMU_ACPI_UNITY_MAPPING_MASK, AMD_IOMMU_ACPI_UNITY_MAPPING_SHIFT) ) { iw = get_field_from_byte(ivmd_block->header.flags, AMD_IOMMU_ACPI_IW_PERMISSION_MASK, AMD_IOMMU_ACPI_IW_PERMISSION_SHIFT); ir = get_field_from_byte(ivmd_block->header.flags, AMD_IOMMU_ACPI_IR_PERMISSION_MASK, AMD_IOMMU_ACPI_IR_PERMISSION_SHIFT); } else { AMD_IOMMU_DEBUG("IVMD Error: Invalid Flag Field!\n"); return -ENODEV; } switch( ivmd_block->header.type ) { case AMD_IOMMU_ACPI_IVMD_ALL_TYPE: return register_exclusion_range_for_all_devices( base, limit, iw, ir); case AMD_IOMMU_ACPI_IVMD_ONE_TYPE: return parse_ivmd_device_select(ivmd_block, base, limit, iw, ir); case AMD_IOMMU_ACPI_IVMD_RANGE_TYPE: return parse_ivmd_device_range(ivmd_block, base, limit, iw, ir); case AMD_IOMMU_ACPI_IVMD_IOMMU_TYPE: return parse_ivmd_device_iommu(ivmd_block, base, limit, iw, ir); default: AMD_IOMMU_DEBUG("IVMD Error: Invalid Block Type!\n"); return -ENODEV; } } static u16 __init parse_ivhd_device_padding( u16 pad_length, u16 header_length, u16 block_length) { if ( header_length < (block_length + pad_length) ) { AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } return pad_length; } static u16 __init parse_ivhd_device_select( union acpi_ivhd_device *ivhd_device, struct amd_iommu *iommu) { u16 bdf; bdf = ivhd_device->header.dev_id; if ( bdf >= ivrs_bdf_entries ) { AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf); return 0; } add_ivrs_mapping_entry(bdf, bdf, ivhd_device->header.flags, iommu); return sizeof(struct acpi_ivhd_device_header); } static u16 __init parse_ivhd_device_range( union acpi_ivhd_device *ivhd_device, u16 header_length, u16 block_length, struct amd_iommu *iommu) { u16 dev_length, first_bdf, last_bdf, bdf; dev_length = sizeof(struct acpi_ivhd_device_range); if ( header_length < (block_length + dev_length) ) { AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } if ( ivhd_device->range.trailer.type != AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END ) { AMD_IOMMU_DEBUG("IVHD Error: " "Invalid Range: End_Type 0x%x\n", ivhd_device->range.trailer.type); return 0; } first_bdf = ivhd_device->header.dev_id; if ( first_bdf >= ivrs_bdf_entries ) { AMD_IOMMU_DEBUG( "IVHD Error: Invalid Range: First Dev_Id 0x%x\n", first_bdf); return 0; } last_bdf = ivhd_device->range.trailer.dev_id; if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) ) { AMD_IOMMU_DEBUG( "IVHD Error: Invalid Range: Last Dev_Id 0x%x\n", last_bdf); return 0; } AMD_IOMMU_DEBUG(" Dev_Id Range: 0x%x -> 0x%x\n", first_bdf, last_bdf); for ( bdf = first_bdf; bdf <= last_bdf; bdf++ ) add_ivrs_mapping_entry(bdf, bdf, ivhd_device->header.flags, iommu); return dev_length; } static u16 __init parse_ivhd_device_alias( union acpi_ivhd_device *ivhd_device, u16 header_length, u16 block_length, struct amd_iommu *iommu) { u16 dev_length, alias_id, bdf; dev_length = sizeof(struct acpi_ivhd_device_alias); if ( header_length < (block_length + dev_length) ) { AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } bdf = ivhd_device->header.dev_id; if ( bdf >= ivrs_bdf_entries ) { AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf); return 0; } alias_id = ivhd_device->alias.dev_id; if ( alias_id >= ivrs_bdf_entries ) { AMD_IOMMU_DEBUG("IVHD Error: Invalid Alias Dev_Id 0x%x\n", alias_id); return 0; } AMD_IOMMU_DEBUG(" Dev_Id Alias: 0x%x\n", alias_id); add_ivrs_mapping_entry(bdf, alias_id, ivhd_device->header.flags, iommu); return dev_length; } static u16 __init parse_ivhd_device_alias_range( union acpi_ivhd_device *ivhd_device, u16 header_length, u16 block_length, struct amd_iommu *iommu) { u16 dev_length, first_bdf, last_bdf, alias_id, bdf; dev_length = sizeof(struct acpi_ivhd_device_alias_range); if ( header_length < (block_length + dev_length) ) { AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } if ( ivhd_device->alias_range.trailer.type != AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END ) { AMD_IOMMU_DEBUG("IVHD Error: " "Invalid Range: End_Type 0x%x\n", ivhd_device->alias_range.trailer.type); return 0; } first_bdf = ivhd_device->header.dev_id; if ( first_bdf >= ivrs_bdf_entries ) { AMD_IOMMU_DEBUG( "IVHD Error: Invalid Range: First Dev_Id 0x%x\n", first_bdf); return 0; } last_bdf = ivhd_device->alias_range.trailer.dev_id; if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf ) { AMD_IOMMU_DEBUG( "IVHD Error: Invalid Range: Last Dev_Id 0x%x\n", last_bdf); return 0; } alias_id = ivhd_device->alias_range.alias.dev_id; if ( alias_id >= ivrs_bdf_entries ) { AMD_IOMMU_DEBUG("IVHD Error: Invalid Alias Dev_Id 0x%x\n", alias_id); return 0; } AMD_IOMMU_DEBUG(" Dev_Id Range: 0x%x -> 0x%x\n", first_bdf, last_bdf); AMD_IOMMU_DEBUG(" Dev_Id Alias: 0x%x\n", alias_id); for ( bdf = first_bdf; bdf <= last_bdf; bdf++ ) add_ivrs_mapping_entry(bdf, alias_id, ivhd_device->header.flags, iommu); return dev_length; } static u16 __init parse_ivhd_device_extended( union acpi_ivhd_device *ivhd_device, u16 header_length, u16 block_length, struct amd_iommu *iommu) { u16 dev_length, bdf; dev_length = sizeof(struct acpi_ivhd_device_extended); if ( header_length < (block_length + dev_length) ) { AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } bdf = ivhd_device->header.dev_id; if ( bdf >= ivrs_bdf_entries ) { AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf); return 0; } add_ivrs_mapping_entry(bdf, bdf, ivhd_device->header.flags, iommu); return dev_length; } static u16 __init parse_ivhd_device_extended_range( union acpi_ivhd_device *ivhd_device, u16 header_length, u16 block_length, struct amd_iommu *iommu) { u16 dev_length, first_bdf, last_bdf, bdf; dev_length = sizeof(struct acpi_ivhd_device_extended_range); if ( header_length < (block_length + dev_length) ) { AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } if ( ivhd_device->extended_range.trailer.type != AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END ) { AMD_IOMMU_DEBUG("IVHD Error: " "Invalid Range: End_Type 0x%x\n", ivhd_device->extended_range.trailer.type); return 0; } first_bdf = ivhd_device->header.dev_id; if ( first_bdf >= ivrs_bdf_entries ) { AMD_IOMMU_DEBUG( "IVHD Error: Invalid Range: First Dev_Id 0x%x\n", first_bdf); return 0; } last_bdf = ivhd_device->extended_range.trailer.dev_id; if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) ) { AMD_IOMMU_DEBUG( "IVHD Error: Invalid Range: Last Dev_Id 0x%x\n", last_bdf); return 0; } AMD_IOMMU_DEBUG(" Dev_Id Range: 0x%x -> 0x%x\n", first_bdf, last_bdf); for ( bdf = first_bdf; bdf <= last_bdf; bdf++ ) add_ivrs_mapping_entry(bdf, bdf, ivhd_device->header.flags, iommu); return dev_length; } static u16 __init parse_ivhd_device_special( union acpi_ivhd_device *ivhd_device, u16 header_length, u16 block_length, struct amd_iommu *iommu) { u16 dev_length, bdf; dev_length = sizeof(struct acpi_ivhd_device_special); if ( header_length < (block_length + dev_length) ) { AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } bdf = ivhd_device->special.dev_id; if ( bdf >= ivrs_bdf_entries ) { AMD_IOMMU_DEBUG("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf); return 0; } add_ivrs_mapping_entry(bdf, bdf, ivhd_device->header.flags, iommu); /* set device id of ioapic */ ioapic_bdf[ivhd_device->special.handle] = bdf; return dev_length; } static int __init parse_ivhd_block(struct acpi_ivhd_block_header *ivhd_block) { union acpi_ivhd_device *ivhd_device; u16 block_length, dev_length; struct amd_iommu *iommu; if ( ivhd_block->header.length < sizeof(struct acpi_ivhd_block_header) ) { AMD_IOMMU_DEBUG("IVHD Error: Invalid Block Length!\n"); return -ENODEV; } iommu = find_iommu_from_bdf_cap(ivhd_block->header.dev_id, ivhd_block->cap_offset); if ( !iommu ) { AMD_IOMMU_DEBUG("IVHD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n", ivhd_block->header.dev_id, ivhd_block->cap_offset); return -ENODEV; } /* parse Device Entries */ block_length = sizeof(struct acpi_ivhd_block_header); while ( ivhd_block->header.length >= (block_length + sizeof(struct acpi_ivhd_device_header)) ) { ivhd_device = (union acpi_ivhd_device *) ((u8 *)ivhd_block + block_length); AMD_IOMMU_DEBUG( "IVHD Device Entry:\n"); AMD_IOMMU_DEBUG( " Type 0x%x\n", ivhd_device->header.type); AMD_IOMMU_DEBUG( " Dev_Id 0x%x\n", ivhd_device->header.dev_id); AMD_IOMMU_DEBUG( " Flags 0x%x\n", ivhd_device->header.flags); switch ( ivhd_device->header.type ) { case AMD_IOMMU_ACPI_IVHD_DEV_U32_PAD: dev_length = parse_ivhd_device_padding( sizeof(u32), ivhd_block->header.length, block_length); break; case AMD_IOMMU_ACPI_IVHD_DEV_U64_PAD: dev_length = parse_ivhd_device_padding( sizeof(u64), ivhd_block->header.length, block_length); break; case AMD_IOMMU_ACPI_IVHD_DEV_SELECT: dev_length = parse_ivhd_device_select(ivhd_device, iommu); break; case AMD_IOMMU_ACPI_IVHD_DEV_RANGE_START: dev_length = parse_ivhd_device_range( ivhd_device, ivhd_block->header.length, block_length, iommu); break; case AMD_IOMMU_ACPI_IVHD_DEV_ALIAS_SELECT: dev_length = parse_ivhd_device_alias( ivhd_device, ivhd_block->header.length, block_length, iommu); break; case AMD_IOMMU_ACPI_IVHD_DEV_ALIAS_RANGE: dev_length = parse_ivhd_device_alias_range( ivhd_device, ivhd_block->header.length, block_length, iommu); break; case AMD_IOMMU_ACPI_IVHD_DEV_EXT_SELECT: dev_length = parse_ivhd_device_extended( ivhd_device, ivhd_block->header.length, block_length, iommu); break; case AMD_IOMMU_ACPI_IVHD_DEV_EXT_RANGE: dev_length = parse_ivhd_device_extended_range( ivhd_device, ivhd_block->header.length, block_length, iommu); break; case AMD_IOMMU_ACPI_IVHD_DEV_SPECIAL: dev_length = parse_ivhd_device_special( ivhd_device, ivhd_block->header.length, block_length, iommu); break; default: AMD_IOMMU_DEBUG("IVHD Error: Invalid Device Type!\n"); dev_length = 0; break; } block_length += dev_length; if ( !dev_length ) return -ENODEV; } return 0; } static int __init parse_ivrs_block(struct acpi_ivrs_block_header *ivrs_block) { struct acpi_ivhd_block_header *ivhd_block; struct acpi_ivmd_block_header *ivmd_block; switch ( ivrs_block->type ) { case AMD_IOMMU_ACPI_IVHD_TYPE: ivhd_block = (struct acpi_ivhd_block_header *)ivrs_block; return parse_ivhd_block(ivhd_block); case AMD_IOMMU_ACPI_IVMD_ALL_TYPE: case AMD_IOMMU_ACPI_IVMD_ONE_TYPE: case AMD_IOMMU_ACPI_IVMD_RANGE_TYPE: case AMD_IOMMU_ACPI_IVMD_IOMMU_TYPE: ivmd_block = (struct acpi_ivmd_block_header *)ivrs_block; return parse_ivmd_block(ivmd_block); default: AMD_IOMMU_DEBUG("IVRS Error: Invalid Block Type!\n"); return -ENODEV; } return 0; } static void __init dump_acpi_table_header(struct acpi_table_header *table) { int i; AMD_IOMMU_DEBUG("ACPI Table:\n"); AMD_IOMMU_DEBUG(" Signature "); for ( i = 0; i < ACPI_NAME_SIZE; i++ ) printk("%c", table->signature[i]); printk("\n"); AMD_IOMMU_DEBUG(" Length 0x%x\n", table->length); AMD_IOMMU_DEBUG(" Revision 0x%x\n", table->revision); AMD_IOMMU_DEBUG(" CheckSum 0x%x\n", table->checksum); AMD_IOMMU_DEBUG(" OEM_Id "); for ( i = 0; i < ACPI_OEM_ID_SIZE; i++ ) printk("%c", table->oem_id[i]); printk("\n"); AMD_IOMMU_DEBUG(" OEM_Table_Id "); for ( i = 0; i < ACPI_OEM_TABLE_ID_SIZE; i++ ) printk("%c", table->oem_table_id[i]); printk("\n"); AMD_IOMMU_DEBUG(" OEM_Revision 0x%x\n", table->oem_revision); AMD_IOMMU_DEBUG(" Creator_Id "); for ( i = 0; i < ACPI_NAME_SIZE; i++ ) printk("%c", table->asl_compiler_id[i]); printk("\n"); AMD_IOMMU_DEBUG(" Creator_Revision 0x%x\n", table->asl_compiler_revision); } static int __init parse_ivrs_table(struct acpi_table_header *_table) { struct acpi_ivrs_block_header *ivrs_block; unsigned long length; int error = 0; struct acpi_table_header *table = (struct acpi_table_header *)_table; BUG_ON(!table); if ( amd_iommu_debug ) dump_acpi_table_header(table); /* parse IVRS blocks */ length = sizeof(struct acpi_ivrs_table_header); while ( (err
/*
 *  GPIO Button Hotplug driver
 *
 *  Copyright (C) 2012 Felix Fietkau <nbd@openwrt.org>
 *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
 *
 *  Based on the diag.c - GPIO interface driver for Broadcom boards
 *    Copyright (C) 2006 Mike Baker <mbm@openwrt.org>,
 *    Copyright (C) 2006-2007 Felix Fietkau <nbd@openwrt.org>
 *    Copyright (C) 2008 Andy Boyett <agb@openwrt.org>
 *
 *  This program is free software; you can redistribute it and/or modify it
 *  under the terms of the GNU General Public License version 2 as published
 *  by the Free Software Foundation.
 */

#include <linux/module.h>
#include <linux/version.h>
#include <linux/kmod.h>

#include <linux/workqueue.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/kobject.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/of_gpio.h>
#include <linux/gpio_keys.h>

#define DRV_NAME	"gpio-keys"

#define BH_SKB_SIZE	2048

#define PFX	DRV_NAME ": "

#undef BH_DEBUG

#ifdef BH_DEBUG
#define BH_DBG(fmt, args...) printk(KERN_DEBUG "%s: " fmt, DRV_NAME, ##args )
#else
#define BH_DBG(fmt, args...) do {} while (0)
#endif

#define BH_ERR(fmt, args...) printk(KERN_ERR "%s: " fmt, DRV_NAME, ##args )

struct bh_priv {
	unsigned long		seen;
};

struct bh_event {
	const char		*name;
	unsigned int		type;
	char			*action;
	unsigned long		seen;

	struct sk_buff		*skb;
	struct work_struct	work;
};

struct bh_map {
	unsigned int	code;
	const char	*name;
};

struct gpio_keys_button_data {
	struct delayed_work work;
	struct bh_priv bh;
	int last_state;
	int count;
	int threshold;
	int can_sleep;
	struct gpio_keys_button *b;
};

extern u64 uevent_next_seqnum(void);

#define BH_MAP(_code, _name)		\
	{				\
		.code = (_code),	\
		.name = (_name),	\
	}

static struct bh_map button_map[] = {
	BH_MAP(BTN_0,		"BTN_0"),
	BH_MAP(BTN_1,		"BTN_1"),
	BH_MAP(BTN_2,		"BTN_2"),
	BH_MAP(BTN_3,		"BTN_3"),
	BH_MAP(BTN_4,		"BTN_4"),
	BH_MAP(BTN_5,		"BTN_5"),
	BH_MAP(BTN_6,		"BTN_6"),
	BH_MAP(BTN_7,		"BTN_7"),
	BH_MAP(BTN_8,		"BTN_8"),
	BH_MAP(BTN_9,		"BTN_9"),
	BH_MAP(KEY_POWER,	"power"),
	BH_MAP(KEY_RESTART,	"reset"),
	BH_MAP(KEY_RFKILL,	"rfkill"),
	BH_MAP(KEY_WPS_BUTTON,	"wps"),
	BH_MAP(KEY_WIMAX,	"wwan"),
};

/* -------------------------------------------------------------------------*/

static __printf(3, 4)
int bh_event_add_var(struct bh_event *event, int argv, const char *format, ...)
{
	static char buf[128];
	char *s;
	va_list args;
	int len;

	if (argv)
		return 0;

	va_start(args, format);
	len = vsnprintf(buf, sizeof(buf), format, args);
	va_end(args);

	if (len >= sizeof(buf)) {
		WARN(1, "buffer size too small");
		return -ENOMEM;
	}

	s = skb_put(event->skb, len + 1);
	strcpy(s, buf);

	BH_DBG("added variable '%s'\n", s);

	return 0;
}

static int button_hotplug_fill_event(struct bh_event *event)
{
	int ret;

	ret = bh_event_add_var(event, 0, "HOME=%s", "/");
	if (ret)
		return ret;

	ret = bh_event_add_var(event, 0, "PATH=%s",
					"/sbin:/bin:/usr/sbin:/usr/bin");
	if (ret)
		return ret;

	ret = bh_event_add_var(event, 0, "SUBSYSTEM=%s", "button");
	if (ret)
		return ret;

	ret = bh_event_add_var(event, 0, "ACTION=%s", event->action);
	if (ret)
		return ret;

	ret = bh_event_add_var(event, 0, "BUTTON=%s", event->name);
	if (ret)
		return ret;

	if (event->type == EV_SW) {
		ret = bh_event_add_var(event, 0, "TYPE=%s", "switch");
		if (ret)
			return ret;
	}

	ret = bh_event_add_var(event, 0, "SEEN=%ld", event->seen);
	if (ret)
		return ret;

	ret = bh_event_add_var(event, 0, "SEQNUM=%llu", uevent_next_seqnum());

	return ret;
}

static void button_hotplug_work(struct work_struct *work)
{
	struct bh_event *event = container_of(work, struct bh_event, work);
	int ret = 0;

	event->skb = alloc_skb(BH_SKB_SIZE, GFP_KERNEL);
	if (!event->skb)
		goto out_free_event;

	ret = bh_event_add_var(event, 0, "%s@", event->action);
	if (ret)
		goto out_free_skb;

	ret = button_hotplug_fill_event(event);
	if (ret)
		goto out_free_skb;

	NETLINK_CB(event->skb).dst_group = 1;
	broadcast_uevent(event->skb, 0, 1, GFP_KERNEL);

 out_free_skb:
	if (ret) {
		BH_ERR("work error %d\n", ret);
		kfree_skb(event->skb);
	}
 out_free_event:
	kfree(event);
}

static int button_hotplug_create_event(const char *name, unsigned int type,
		unsigned long seen, int pressed)
{
	struct bh_event *event;

	BH_DBG("create event, name=%s, seen=%lu, pressed=%d\n",
		name, seen, pressed);

	event = kzalloc(sizeof(*event), GFP_KERNEL);
	if (!event)
		return -ENOMEM;

	event->name = name;
	event->type = type;
	event->seen = seen;
	event->action = pressed ? "pressed" : "released";

	INIT_WORK(&event->work, (void *)(void *)button_hotplug_work);
	schedule_work(&event->work);

	return 0;
}

/* -------------------------------------------------------------------------*/

static int button_get_index(unsigned int code)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(button_map); i++)
		if (button_map[i].code == code)
			return i;

	return -1;
}

static void button_hotplug_event(struct gpio_keys_button_data *data,
			   unsigned int type, int value)
{
	struct bh_priv *priv = &data->bh;
	unsigned long seen = jiffies;
	int btn;

	BH_DBG("event type=%u, code=%u, value=%d\n", type, data->b->code, value);

	if ((type != EV_KEY) && (type != EV_SW))
		return;

	btn = button_get_index(data->b->code);
	if (btn < 0)
		return;

	button_hotplug_create_event(button_map[btn].name, type,
			(seen - priv->seen) / HZ, value);
	priv->seen = seen;
}

struct gpio_keys_button_dev {
	int polled;
	struct delayed_work work;

	struct device *dev;
	struct gpio_keys_platform_data *pdata;
	struct gpio_keys_button_data data[0];
};

static int gpio_button_get_value(struct gpio_keys_button_data *bdata)
{
	int val;

	if (bdata->can_sleep)
		val = !!gpio_get_value_cansleep(bdata->b->gpio);
	else
		val = !!gpio_get_value(bdata->b->gpio);

	return val ^ bdata->b->active_low;
}

static void gpio_keys_polled_check_state(struct gpio_keys_button_data *bdata)
{
	int state = gpio_button_get_value(bdata);

	if (state != bdata->last_state) {
		unsigned int type = bdata->b->type ?: EV_KEY;

		if (bdata->count < bdata->threshold) {
			bdata->count++;
			return;
		}

		if ((bdata->last_state != -1) || (type == EV_SW))
			button_hotplug_event(bdata, type, state);

		bdata->last_state = state;
	}

	bdata->count = 0;
}

static void gpio_keys_polled_queue_work(struct gpio_keys_button_dev *bdev)
{
	struct gpio_keys_platform_data *pdata = bdev->pdata;
	unsigned long delay = msecs_to_jiffies(pdata->poll_interval);

	if (delay >= HZ)
		delay = round_jiffies_relative(delay);
	schedule_delayed_work(&bdev->work, delay);
}

static void gpio_keys_polled_poll(struct work_struct *work)
{
	struct gpio_keys_button_dev *bdev =
		container_of(work, struct gpio_keys_button_dev, work.work);
	int i;

	for (i = 0; i < bdev->pdata->nbuttons; i++) {
		struct gpio_keys_button_data *bdata = &bdev->data[i];
		gpio_keys_polled_check_state(bdata);
	}
	gpio_keys_polled_queue_work(bdev);
}

static void gpio_keys_polled_close(struct gpio_keys_button_dev *bdev)
{
	struct gpio_keys_platform_data *pdata = bdev->pdata;

	cancel_delayed_work_sync(&bdev->work);

	if (pdata->disable)
		pdata->disable(bdev->dev);
}

static irqreturn_t button_handle_irq(int irq, void *_bdata)
{
	struct gpio_keys_button_data *bdata = (struct gpio_keys_button_data *) _bdata;

	button_hotplug_event(bdata, bdata->b->type ?: EV_KEY, gpio_button_get_value(bdata));

	return IRQ_HANDLED;
}

#ifdef CONFIG_OF
static struct gpio_keys_platform_data *
gpio_keys_get_devtree_pdata(struct device *dev)
{
	struct device_node *node, *pp;
	struct gpio_keys_platform_data *pdata;
	struct gpio_keys_button *button;
	int error;
	int nbuttons;
	int i = 0;

	node = dev->of_node;
	if (!node)
		return NULL;

	nbuttons = of_get_child_count(node);
	if (nbuttons == 0)
		return NULL;

	pdata = devm_kzalloc(dev, sizeof(*pdata) + nbuttons * (sizeof *button),
		GFP_KERNEL);
	if (!pdata) {
		error = -ENOMEM;
		goto err_out;
	}

	pdata->buttons = (struct gpio_keys_button *)(pdata + 1);
	pdata->nbuttons = nbuttons;

	pdata->rep = !!of_get_property(node, "autorepeat", NULL);
	of_property_read_u32(node, "poll-interval", &pdata->poll_interval);

	for_each_child_of_node(node, pp) {
		enum of_gpio_flags flags;

		if (!of_find_property(pp, "gpios", NULL)) {
			pdata->nbuttons--;
			dev_warn(dev, "Found button without gpios\n");
			continue;
		}

		button = &pdata->buttons[i++];

		button->gpio = of_get_gpio_flags(pp, 0, &flags);
		if (button->gpio < 0) {
			error = button->gpio;
			if (error != -ENOENT) {
				if (error != -EPROBE_DEFER)
					dev_err(dev,
						"Failed to get gpio flags, error: %d\n",
						error);
				return ERR_PTR(error);
			}
		} else {
			button->active_low = flags & OF_GPIO_ACTIVE_LOW;
		}

		if (of_property_read_u32(pp, "linux,code", &button->code)) {
			dev_err(dev, "Button without keycode: 0x%x\n",
				button->gpio);
			error = -EINVAL;
			goto err_out;
		}

		button->desc = of_get_property(pp, "label", NULL);

		if (of_property_read_u32(pp, "linux,input-type", &button->type))
			button->type = EV_KEY;

		button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);

		if (of_property_read_u32(pp, "debounce-interval",
					&button->debounce_interval))
			button->debounce_interval = 5;
	}

	if (pdata->nbuttons == 0) {
		error = -EINVAL;
		goto err_out;
	}

	return pdata;

err_out:
	return ERR_PTR(error);
}

static struct of_device_id gpio_keys_of_match[] = {
	{ .compatible = "gpio-keys", },
	{ },
};
MODULE_DEVICE_TABLE(of, gpio_keys_of_match);

static struct of_device_id gpio_keys_polled_of_match[] = {
	{ .compatible = "gpio-keys-polled", },
	{ },
};
MODULE_DEVICE_TABLE(of, gpio_keys_polled_of_match);

#else

static inline struct gpio_keys_platform_data *
gpio_keys_get_devtree_pdata(struct device *dev)
{
	return NULL;
}
#endif

static int gpio_keys_button_probe(struct platform_device *pdev,
		struct gpio_keys_button_dev **_bdev, int polled)
{
	struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
	struct device *dev = &pdev->dev;
	struct gpio_keys_button_dev *bdev;
	struct gpio_keys_button *buttons;
	int error;
	int i;

	if (!pdata) {
		pdata = gpio_keys_get_devtree_pdata(dev);
		if (IS_ERR(pdata))
			return PTR_ERR(pdata);
		if (!pdata) {
			dev_err(dev, "missing platform data\n");
			return -EINVAL;
		}
		pdev->dev.platform_data = pdata;
	}

	if (polled && !pdata->poll_interval) {
		dev_err(dev, "missing poll_interval value\n");
		return -EINVAL;
	}

	buttons = devm_kzalloc(dev, pdata->nbuttons * sizeof(struct gpio_keys_button),
		       GFP_KERNEL);
	if (!buttons) {
		dev_err(dev, "no memory for button data\n");
		return -ENOMEM;
	}
	memcpy(buttons, pdata->buttons, pdata->nbuttons * sizeof(struct gpio_keys_button));

	bdev = devm_kzalloc(dev, sizeof(struct gpio_keys_button_dev) +
		       pdata->nbuttons * sizeof(struct gpio_keys_button_data),
		       GFP_KERNEL);
	if (!bdev) {
		dev_err(dev, "no memory for private data\n");
		return -ENOMEM;
	}

	bdev->polled = polled;

	for (i = 0; i < pdata->nbuttons; i++) {
		struct gpio_keys_button *button = &buttons[i];
		struct gpio_keys_button_data *bdata = &bdev->data[i];
		unsigned int gpio = button->gpio;

		if (button->wakeup) {
			dev_err(dev, DRV_NAME "does not support wakeup\n");
			return -EINVAL;
		}

		error = devm_gpio_request(dev, gpio,
				     button->desc ? button->desc : DRV_NAME);
		if (error) {
			dev_err(dev, "unable to claim gpio %u, err=%d\n",
				gpio, error);
			return error;
		}

		error = gpio_direction_input(gpio);
		if (error) {
			dev_err(dev,
				"unable to set direction on gpio %u, err=%d\n",
				gpio, error);
			return error;
		}

		bdata->can_sleep = gpio_cansleep(gpio);
		bdata->last_state = -1;

		if (bdev->polled)
			bdata->threshold = DIV_ROUND_UP(button->debounce_interval,
						pdata->poll_interval);
		else
			bdata->threshold = 1;

		bdata->b = &pdata->buttons[i];
	}

	bdev->dev = &pdev->dev;
	bdev->pdata = pdata;
	platform_set_drvdata(pdev, bdev);

	*_bdev = bdev;

	return 0;
}

static int gpio_keys_probe(struct platform_device *pdev)
{
	struct gpio_keys_platform_data *pdata;
	struct gpio_keys_button_dev *bdev;
	int ret, i;


	ret = gpio_keys_button_probe(pdev, &bdev, 0);

	if (ret)
		return ret;

	pdata = pdev->dev.platform_data;
	for (i = 0; i < pdata->nbuttons; i++) {
		struct gpio_keys_button *button = &pdata->buttons[i];
		struct gpio_keys_button_data *bdata = &bdev->data[i];

		if (bdata->can_sleep) {
			dev_err(&pdev->dev, "skipping gpio:%d, it can sleep\n", button->gpio);
			continue;
		}
		if (!button->irq)
			button->irq = gpio_to_irq(button->gpio);
		if (button->irq < 0) {
			dev_err(&pdev->dev, "failed to get irq for gpio:%d\n", button->gpio);
			continue;
		}
		ret = devm_request_irq(&pdev->dev, button->irq, button_handle_irq,
					IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
					dev_name(&pdev->dev), bdata);
		if (ret)
			dev_err(&pdev->dev, "failed to request irq:%d for gpio:%d\n", button->irq, button->gpio);
		else
			dev_dbg(&pdev->dev, "gpio:%d has irq:%d\n", button->gpio, button->irq);

		if (bdata->b->type == EV_SW)
			button_hotplug_event(bdata, EV_SW, gpio_button_get_value(bdata));
	}

	return 0;
}

static int gpio_keys_polled_probe(struct platform_device *pdev)
{
	struct gpio_keys_platform_data *pdata;
	struct gpio_keys_button_dev *bdev;
	int ret;
	int i;

	ret = gpio_keys_button_probe(pdev, &bdev, 1);

	if (ret)
		return ret;

	INIT_DELAYED_WORK(&bdev->work, gpio_keys_polled_poll);

	pdata = bdev->pdata;

	if (pdata->enable)
		pdata->enable(bdev->dev);

	for (i = 0; i < pdata->nbuttons; i++)
		gpio_keys_polled_check_state(&bdev->data[i]);

	gpio_keys_polled_queue_work(bdev);

	return ret;
}

static int gpio_keys_remove(struct platform_device *pdev)
{
	struct gpio_keys_button_dev *bdev = platform_get_drvdata(pdev);

	platform_set_drvdata(pdev, NULL);

	if (bdev->polled)
		gpio_keys_polled_close(bdev);

	return 0;
}

static struct platform_driver gpio_keys_driver = {
	.probe	= gpio_keys_probe,
	.remove	= gpio_keys_remove,
	.driver	= {
		.name	= "gpio-keys",
		.owner	= THIS_MODULE,
		.of_match_table = of_match_ptr(gpio_keys_of_match),
	},
};

static struct platform_driver gpio_keys_polled_driver = {
	.probe	= gpio_keys_polled_probe,
	.remove	= gpio_keys_remove,
	.driver	= {
		.name	= "gpio-keys-polled",
		.owner	= THIS_MODULE,
		.of_match_table = of_match_ptr(gpio_keys_polled_of_match),
	},
};

static int __init gpio_button_init(void)
{
	int ret;

	ret = platform_driver_register(&gpio_keys_driver);
	if (ret)
		return ret;

	ret = platform_driver_register(&gpio_keys_polled_driver);
	if (ret)
		platform_driver_unregister(&gpio_keys_driver);

	return ret;
}

static void __exit gpio_button_exit(void)
{
	platform_driver_unregister(&gpio_keys_driver);
	platform_driver_unregister(&gpio_keys_polled_driver);
}

module_init(gpio_button_init);
module_exit(gpio_button_exit);

MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
MODULE_DESCRIPTION("Polled GPIO Buttons hotplug driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRV_NAME);