#include #include #include #include #include #include #include #include #include #include #include /* opt_mem: Limit of physical RAM. Any RAM beyond this point is ignored. */ static unsigned long long __initdata opt_mem; size_param("mem", opt_mem); /* opt_nomtrr_check: Don't clip ram to highest cacheable MTRR. */ static int __initdata e820_mtrr_clip = -1; boolean_param("e820-mtrr-clip", e820_mtrr_clip); /* opt_e820_verbose: Be verbose about clipping, the original e820, &c */ static int __initdata e820_verbose; boolean_param("e820-verbose", e820_verbose); struct e820map e820; /* * This function checks if the entire range is mapped with type. * * Note: this function only works correct if the e820 table is sorted and * not-overlapping, which is the case */ int __init e820_all_mapped(u64 start, u64 end, unsigned type) { int i; for (i = 0; i < e820.nr_map; i++) { struct e820entry *ei = &e820.map[i]; if (type && ei->type != type) continue; /* is the region (part) in overlap with the current region ?*/ if (ei->addr >= end || ei->addr + ei->size <= start) continue; /* if the region is at the beginning of we move * start to the end of the region since it's ok until there */ if (ei->addr <= start) start = ei->addr + ei->size; /* * if start is now at or beyond end, we're done, full * coverage */ if (start >= end) return 1; } return 0; } static void __init add_memory_region(unsigned long long start, unsigned long long size, int type) { int x; /*if (!efi_enabled)*/ { x = e820.nr_map; if (x == E820MAX) { printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); return; } e820.map[x].addr = start; e820.map[x].size = size; e820.map[x].type = type; e820.nr_map++; } } /* add_memory_region */ static void __init print_e820_memory_map(struct e820entry *map, int entries) { int i; for (i = 0; i < entries; i++) { printk(" %016Lx - %016Lx ", (unsigned long long)(map[i].addr), (unsigned long long)(map[i].addr + map[i].size)); switch (map[i].type) { case E820_RAM: printk("(usable)\n"); break; case E820_RESERVED: printk("(reserved)\n"); break; case E820_ACPI: printk("(ACPI data)\n"); break; case E820_NVS: printk("(ACPI NVS)\n"); break; case E820_UNUSABLE: printk("(unusable)\n"); break; default: printk("type %u\n", map[i].type); break; } } } /* * Sanitize the BIOS e820 map. * * Some e820 responses include overlapping entries. The following * replaces the original e820 map with a new one, removing overlaps. * */ struct change_member { struct e820entry *pbios; /* pointer to original bios entry */ unsigned long long addr; /* address for this change point */ }; static struct change_member change_point_list[2*E820MAX] __initdata; static struct change_member *change_point[2*E820MAX] __initdata; static struct e820entry *overlap_list[E820MAX] __initdata; static struct e820entry new_bios[E820MAX] __initdata; static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map) { struct change_member *change_tmp; unsigned long current_type, last_type; unsigned long long last_addr; int chgidx, still_changing; int overlap_entries; int new_bios_entry; int old_nr, new_nr, chg_nr; int i; /* Visually we're performing the following (1,2,3,4 = memory types)... Sample memory map (w/overlaps): ____22__________________ ______________________4_ ____1111________________ _44_____________________ 11111111________________ ____________________33__ ___________44___________ __________33333_________ ______________22________ ___________________2222_ _________111111111______ _____________________11_ ___________
// dear imgui: Renderer for Metal
// This needs to be used along with a Platform Binding (e.g. OSX)

// Implemented features:
//  [X] Renderer: User texture binding. Use 'MTLTexture' as ImTextureID. Read the FAQ about ImTextureID in imgui.cpp.

// You can copy and use unmodified imgui_impl_* files in your project. See main.cpp for an example of using this.
// If you are new to dear imgui, read examples/README.txt and read the documentation at the top of imgui.cpp.
// https://github.com/ocornut/imgui

@class MTLRenderPassDescriptor;
@protocol MTLDevice, MTLCommandBuffer, MTLRenderCommandEncoder;

IMGUI_IMPL_API bool ImGui_ImplMetal_Init(id<MTLDevice> device);
IMGUI_IMPL_API void ImGui_ImplMetal_Shutdown();
IMGUI_IMPL_API void ImGui_ImplMetal_NewFrame(MTLRenderPassDescriptor *renderPassDescriptor);
IMGUI_IMPL_API void ImGui_ImplMetal_RenderDrawData(ImDrawData* draw_data,
                                                   id<MTLCommandBuffer> commandBuffer,
                                                   id<MTLRenderCommandEncoder> commandEncoder);

// Called by Init/NewFrame/Shutdown
IMGUI_IMPL_API bool ImGui_ImplMetal_CreateFontsTexture(id<MTLDevice> device);
IMGUI_IMPL_API void ImGui_ImplMetal_DestroyFontsTexture();
IMGUI_IMPL_API bool ImGui_ImplMetal_CreateDeviceObjects(id<MTLDevice> device);
IMGUI_IMPL_API void ImGui_ImplMetal_DestroyDeviceObjects();
sMask(reg) (0x200 + 2 * (reg) + 1) static uint64_t mtrr_top_of_ram(void) { uint32_t eax, ebx, ecx, edx; uint64_t mtrr_cap, mtrr_def, addr_mask, base, mask, top; unsigned int i, phys_bits = 36; /* By default we check only Intel systems. */ if ( e820_mtrr_clip == -1 ) { char vendor[13]; cpuid(0x00000000, &eax, (uint32_t *)&vendor[0], (uint32_t *)&vendor[8], (uint32_t *)&vendor[4]); vendor[12] = '\0'; e820_mtrr_clip = !strcmp(vendor, "GenuineIntel"); } if ( !e820_mtrr_clip ) return 0; if ( e820_verbose ) printk("Checking MTRR ranges...\n"); /* Does the CPU support architectural MTRRs? */ cpuid(0x00000001, &eax, &ebx, &ecx, &edx); if ( !test_bit(X86_FEATURE_MTRR & 31, &edx) ) return 0; /* Find the physical address size for this CPU. */ cpuid(0x80000000, &eax, &ebx, &ecx, &edx); if ( eax >= 0x80000008 ) { cpuid(0x80000008, &eax, &ebx, &ecx, &edx); phys_bits = (uint8_t)eax; } addr_mask = ((1ull << phys_bits) - 1) & ~((1ull << 12) - 1); rdmsrl(MSR_MTRRcap, mtrr_cap); rdmsrl(MSR_MTRRdefType, mtrr_def); if ( e820_verbose ) printk(" MTRR cap: %"PRIx64" type: %"PRIx64"\n", mtrr_cap, mtrr_def); /* MTRRs enabled, and default memory type is not writeback? */ if ( !test_bit(11, &mtrr_def) || ((uint8_t)mtrr_def == MTRR_TYPE_WRBACK) ) return 0; /* * Find end of highest WB-type range. This is a conservative estimate * of the highest WB address since overlapping UC/WT ranges dominate. */ top = 0; for ( i = 0; i < (uint8_t)mtrr_cap; i++ ) { rdmsrl(MSR_MTRRphysBase(i), base); rdmsrl(MSR_MTRRphysMask(i), mask); if ( e820_verbose ) printk(" MTRR[%d]: base %"PRIx64" mask %"PRIx64"\n", i, base, mask); if ( !test_bit(11, &mask) || ((uint8_t)base != MTRR_TYPE_WRBACK) ) continue; base &= addr_mask; mask &= addr_mask; top = max_t(uint64_t, top, ((base | ~mask) & addr_mask) + PAGE_SIZE); } return top; } static void __init reserve_dmi_region(void) { u32 base, len; if ( (dmi_get_table(&base, &len) == 0) && ((base + len) > base) && reserve_e820_ram(&e820, base, base + len) ) printk("WARNING: DMI table located in E820 RAM %08x-%08x. Fixed.\n", base, base+len); } static void __init machine_specific_memory_setup( struct e820entry *raw, int *raw_nr) { uint64_t top_of_ram; char nr = (char)*raw_nr; sanitize_e820_map(raw, &nr); *raw_nr = nr; (void)copy_e820_map(raw, nr); if ( opt_mem ) clip_to_limit(opt_mem, NULL); #ifdef __i386__ clip_to_limit((1ULL << 30) * MACHPHYS_MBYTES, "Only the first %lu GB of the physical memory map " "can be accessed by Xen in 32-bit mode."); #else { unsigned long mpt_limit, ro_mpt_limit; mpt_limit = ((RDWR_MPT_VIRT_END - RDWR_MPT_VIRT_START) / sizeof(unsigned long)) << PAGE_SHIFT; ro_mpt_limit = ((RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof(unsigned long)) << PAGE_SHIFT; if ( mpt_limit > ro_mpt_limit ) mpt_limit = ro_mpt_limit; clip_to_limit(mpt_limit, "Only the first %lu GB of the physical " "memory map can be accessed by Xen."); } #endif reserve_dmi_region(); top_of_ram = mtrr_top_of_ram(); if ( top_of_ram ) clip_to_limit(top_of_ram, "MTRRs do not cover all of memory."); } int __init e820_change_range_type( struct e820map *e820, uint64_t s, uint64_t e, uint32_t orig_type, uint32_t new_type) { uint64_t rs = 0, re = 0; int i; for ( i = 0; i < e820->nr_map; i++ ) { /* Have we found the e820 region that includes the specified range? */ rs = e820->map[i].addr; re = rs + e820->map[i].size; if ( (s >= rs) && (e <= re) ) break; } if ( (i == e820->nr_map) || (e820->map[i].type != orig_type) ) return 0; if ( (s == rs) && (e == re) ) { e820->map[i].type = new_type; } else if ( (s == rs) || (e == re) ) { if ( (e820->nr_map + 1) > ARRAY_SIZE(e820->map) ) goto overflow; memmove(&e820->map[i+1], &e820->map[i], (e820->nr_map-i) * sizeof(e820->map[0])); e820->nr_map++; if ( s == rs ) { e820->map[i].size = e - s; e820->map[i].type = new_type; e820->map[i+1].addr = e; e820->map[i+1].size = re - e; } else { e820->map[i].size = s - rs; e820->map[i+1].addr = s; e820->map[i+1].size = e - s; e820->map[i+1].type = new_type; } } else { if ( (e820->nr_map + 2) > ARRAY_SIZE(e820->map) ) goto overflow; memmove(&e820->map[i+2], &e820->map[i], (e820->nr_map-i) * sizeof(e820->map[0])); e820->nr_map += 2; e820->map[i].size = s - rs; e820->map[i+1].addr = s; e820->map[i+1].size = e - s; e820->map[i+1].type = new_type; e820->map[i+2].addr = e; e820->map[i+2].size = re - e; } /* Finally, look for any opportunities to merge adjacent e820 entries. */ for ( i = 0; i < (e820->nr_map - 1); i++ ) { if ( (e820->map[i].type != e820->map[i+1].type) || ((e820->map[i].addr + e820->map[i].size) != e820->map[i+1].addr) ) continue; e820->map[i].size += e820->map[i+1].size; memmove(&e820->map[i+1], &e820->map[i+2], (e820->nr_map-i-2) * sizeof(e820->map[0])); e820->nr_map--; i--; } return 1; overflow: printk("Overflow in e820 while reserving region %"PRIx64"-%"PRIx64"\n", s, e); return 0; } /* Set E820_RAM area (@s,@e) as RESERVED in specified e820 map. */ int __init reserve_e820_ram(struct e820map *e820, uint64_t s, uint64_t e) { return e820_change_range_type(e820, s, e, E820_RAM, E820_RESERVED); } unsigned long __init init_e820( const char *str, struct e820entry *raw, int *raw_nr) { if ( e820_verbose ) { printk("Initial %s RAM map:\n", str); print_e820_memory_map(raw, *raw_nr); } machine_specific_memory_setup(raw, raw_nr); printk("%s RAM map:\n", str); print_e820_memory_map(e820.map, e820.nr_map); return find_max_pfn(); }