aboutsummaryrefslogtreecommitdiffstats
path: root/toolchain/binutils/patches
Commit message (Expand)AuthorAgeFilesLines
* toolchain: change default emulation for mips64 (octeon) to n64Felix Fietkau2015-05-101-0/+37
* binutils: remove version 2.20.1 (only used by avr32)Felix Fietkau2015-03-248-31182/+0
* binutils: remove old versionsFelix Fietkau2014-10-0511-252/+0
* toolchain/binutils: remove obsolete patchesFelix Fietkau2014-06-068-246/+0
* upgrade the Linaro binutils to 2.24-2013.12Imre Kaloz2014-01-076-19/+9
* add support for Linaro binutils 2.23.2-2013.06Imre Kaloz2013-09-116-0/+127
* binutils: stop generating the useless _DYNAMIC_LINKING symbol on mipsFelix Fietkau2013-06-302-0/+36
* binutils: remove some unused obsolete versionsFelix Fietkau2013-06-3016-62140/+0
* toolchain/binutils: add musl libc config.sub patch to 2.23.1Florian Fainelli2013-02-261-0/+10
* toolchain/binutils: refresh patchesFlorian Fainelli2013-02-2613-201/+107
* remove support for ubicom32Florian Fainelli2012-11-301-48949/+0
* add preliminary support for muslFlorian Fainelli2012-11-231-0/+19
* binutils: remove 2.21Florian Fainelli2012-11-198-31240/+0
* binutils: add support for 2.23.1Florian Fainelli2012-11-194-0/+99
* binutils/2.22: backport an upstream fix for an assertion in the ARM backend.Florian Fainelli2012-11-191-0/+28
* binutils: backport ld patch to fix ARM specific issues with --gc-sectionsFelix Fietkau2012-09-191-0/+12
* add binutils 2.22Imre Kaloz2012-02-085-0/+173
* binutils: add binutils 2.21.1Hauke Mehrtens2011-08-068-0/+31240
* add the latest avr32 toolchain patches (with additional fixes)Imre Kaloz2011-07-011-2/+2
* remove the CS variant - broken since it's additionImre Kaloz2011-02-217-32518/+0
* add some more avr32 fixesImre Kaloz2011-02-181-2/+122
* add binutils 2.21Imre Kaloz2011-02-188-0/+31120
* add avr32 supportImre Kaloz2011-02-161-0/+30797
* fixup the ubicom patch, refreshImre Kaloz2011-02-164-82/+61
* refresh patchesImre Kaloz2011-02-163-8/+5
* binutils: backport a fix for broken relocation entries on mips with -fPIE, fi...Felix Fietkau2011-01-281-0/+15
* binutils: Fix autoreconf for packages/devel/binutils package, which uses binu...Daniel Dickinson2010-12-121-0/+84
* remove support for binutils 2.18 and 2.20Imre Kaloz2010-10-2816-53346/+0
* toolchain/binutils: building gas for arm-*-uclibc systems requires linking wi...Gabor Juhos2010-03-305-0/+190
* add binutils-2.20.1Alexandros C. Couloumbis2010-03-116-0/+326
* add avr32 support for binutils 2.19.1 based on the Atmel 1.2.0 toolchain releaseImre Kaloz2010-02-161-0/+30629
* fixup the hunks in patchImre Kaloz2010-02-151-5/+5
* binutils: remove unrelated hunk in 2.18 avr32 support patchNicolas Thill2010-01-191-9/+0
* binutils: fix build errors in 2.20 (patch from #6094)Felix Fietkau2009-11-051-2/+9
* add binutils 2.20Felix Fietkau2009-11-026-0/+319
* binutils: add support for non-pic handling and use it in the codesourcery bas...Felix Fietkau2009-10-191-0/+202
* add a binutils variant based on a binutils snapshot and codesourcery g++ enha...Felix Fietkau2009-10-196-0/+32480
* add a heavily cleaned up version of ubicom32 toolchain supportFelix Fietkau2009-08-211-0/+48967
* binutils: fix build failure on powerpc targetGabor Juhos2009-07-141-0/+20
* remove binutils 2.17 and 2.19Felix Fietkau2009-05-0516-111564/+0
* fix a small bug in binutils 2.19.1 * http://sourceware.org/bugzilla/show_bug....Gabor Juhos2009-05-011-0/+13
* binutils: refresh patches for binutils 2.17Hauke Mehrtens2009-02-2112-483/+318
* binutils: add support for v2.19.1 (from #4492, thanks to dwrobel)Nicolas Thill2009-02-034-0/+104
* add binutils 2.19 (patch from #4367)Felix Fietkau2009-01-174-0/+111
* nuke binutils 2.16.1 - old, unused, buggyImre Kaloz2008-12-027-1051/+0
* clean up patchImre Kaloz2008-12-021-461/+0
* sync avr32 binutils patch with the Atmel 1.0.1 releaseImre Kaloz2008-12-021-509/+283
* add avr32 support to binutils 2.18Imre Kaloz2008-11-251-0/+53416
* finally fixes etrax toolchain problems, thanks nbdJohn Crispin2008-06-061-0/+17
* fix avr32 compilingTravis Kemen2008-05-161-0/+20
can be lowered to increase the number of domain. */ #define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1) /* Maximum number of RID bits. This is definitly 24. */ #define IA64_MAX_IMPL_RID_BITS 24 /* Maximum number of blocks. */ #define MAX_RID_BLOCKS (1 << (IA64_MAX_IMPL_RID_BITS-IA64_MIN_IMPL_RID_BITS)) /* Default number of rid bits for domains. */ static unsigned int domain_rid_bits_default = IA64_MIN_IMPL_RID_BITS; integer_param("dom_rid_bits", domain_rid_bits_default); #if 0 // following already defined in include/asm-ia64/gcc_intrin.h // it should probably be ifdef'd out from there to ensure all region // register usage is encapsulated in this file static inline unsigned long ia64_get_rr (unsigned long rr) { unsigned long r; __asm__ __volatile__ (";;mov %0=rr[%1];;":"=r"(r):"r"(rr):"memory"); return r; } static inline void ia64_set_rr (unsigned long rr, unsigned long rrv) { __asm__ __volatile__ (";;mov rr[%0]=%1;;"::"r"(rr),"r"(rrv):"memory"); } #endif static unsigned long allocate_metaphysical_rr(struct domain *d, int n) { ia64_rr rrv; rrv.rrval = 0; // Or else may see reserved bit fault rrv.rid = d->arch.starting_mp_rid + n; rrv.ps = PAGE_SHIFT; rrv.ve = 0; /* Mangle metaphysical rid */ rrv.rrval = vmMangleRID(rrv.rrval); return rrv.rrval; } /************************************* Region Block setup/management *************************************/ static int implemented_rid_bits = 0; static int mp_rid_shift; static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 }; void init_rid_allocator (void) { int log_blocks; pal_vm_info_2_u_t vm_info_2; /* Get machine rid_size. */ BUG_ON (ia64_pal_vm_summary (NULL, &vm_info_2) != 0); implemented_rid_bits = vm_info_2.pal_vm_info_2_s.rid_size; /* We need at least a few space... */ BUG_ON (implemented_rid_bits <= IA64_MIN_IMPL_RID_BITS); /* And we can accept too much space. */ if (implemented_rid_bits > IA64_MAX_IMPL_RID_BITS) implemented_rid_bits = IA64_MAX_IMPL_RID_BITS; /* Due to RID mangling, we expect 24 RID bits! This test should be removed if RID mangling is removed/modified. */ if (implemented_rid_bits != 24) { printf ("RID mangling expected 24 RID bits, got only %d!\n", implemented_rid_bits); BUG(); } /* Allow the creation of at least domain 0. */ if (domain_rid_bits_default > implemented_rid_bits - 1) domain_rid_bits_default = implemented_rid_bits - 1; /* Check for too small values. */ if (domain_rid_bits_default < IA64_MIN_IMPL_RID_BITS) { printf ("Default domain rid bits %d is too small, use %d\n", domain_rid_bits_default, IA64_MIN_IMPL_RID_BITS); domain_rid_bits_default = IA64_MIN_IMPL_RID_BITS; } log_blocks = (implemented_rid_bits - IA64_MIN_IMPL_RID_BITS); printf ("Maximum number of domains: %d; %d RID bits per domain\n", (1 << (implemented_rid_bits - domain_rid_bits_default)) - 1, domain_rid_bits_default); mp_rid_shift = IA64_MIN_IMPL_RID_BITS - log_blocks; BUG_ON (mp_rid_shift < 3); } /* * Allocate a power-of-two-sized chunk of region id space -- one or more * "rid blocks" */ int allocate_rid_range(struct domain *d, unsigned long ridbits) { int i, j, n_rid_blocks; if (ridbits == 0) ridbits = domain_rid_bits_default; if (ridbits >= IA64_MAX_IMPL_RID_BITS) ridbits = IA64_MAX_IMPL_RID_BITS - 1; if (ridbits < IA64_MIN_IMPL_RID_BITS) ridbits = IA64_MIN_IMPL_RID_BITS; // convert to rid_blocks and find one n_rid_blocks = 1UL << (ridbits - IA64_MIN_IMPL_RID_BITS); // skip over block 0, reserved for "meta-physical mappings (and Xen)" for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) { if (ridblock_owner[i] == NULL) { for (j = i; j < i + n_rid_blocks; ++j) { if (ridblock_owner[j]) break; } if (ridblock_owner[j] == NULL) break; } } if (i >= MAX_RID_BLOCKS) return 0; // found an unused block: // (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits) // mark this block as owned for (j = i; j < i + n_rid_blocks; ++j) ridblock_owner[j] = d; // setup domain struct d->arch.rid_bits = ridbits; d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS; d->arch.ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS; d->arch.starting_mp_rid = i << mp_rid_shift; d->arch.ending_mp_rid = (i + 1) << mp_rid_shift; d->arch.metaphysical_rr0 = allocate_metaphysical_rr(d, 0); d->arch.metaphysical_rr4 = allocate_metaphysical_rr(d, 1); printf("### domain %p: rid=%x-%x mp_rid=%x\n", d, d->arch.starting_rid, d->arch.ending_rid, d->arch.starting_mp_rid); return 1; } int deallocate_rid_range(struct domain *d) { int i; int rid_block_end = d->arch.ending_rid >> IA64_MIN_IMPL_RID_BITS; int rid_block_start = d->arch.starting_rid >> IA64_MIN_IMPL_RID_BITS; /* Sanity check. */ if (d->arch.rid_bits == 0) return 1; for (i = rid_block_start; i < rid_block_end; ++i) { ASSERT(ridblock_owner[i] == d); ridblock_owner[i] = NULL; } d->arch.rid_bits = 0; d->arch.starting_rid = 0; d->arch.ending_rid = 0; d->arch.starting_mp_rid = 0; d->arch.ending_mp_rid = 0; return 1; } static void set_rr(unsigned long rr, unsigned long rrval) { ia64_set_rr(rr, vmMangleRID(rrval)); ia64_srlz_d(); } // validates and changes a single region register // in the currently executing domain // Passing a value of -1 is a (successful) no-op // NOTE: DOES NOT SET VCPU's rrs[x] value!! int set_one_rr(unsigned long rr, unsigned long val) { struct vcpu *v = current; unsigned long rreg = REGION_NUMBER(rr); ia64_rr rrv, newrrv, memrrv; unsigned long newrid; if (val == -1) return 1; rrv.rrval = val; newrrv.rrval = 0; newrid = v->arch.starting_rid + rrv.rid; if (newrid > v->arch.ending_rid) { printk("can't set rr%d to %lx, starting_rid=%x," "ending_rid=%x, val=%lx\n", (int) rreg, newrid, v->arch.starting_rid,v->arch.ending_rid,val); return 0; } #if 0 memrrv.rrval = rrv.rrval; if (rreg == 7) { newrrv.rid = newrid; newrrv.ve = VHPT_ENABLED_REGION_7; newrrv.ps = IA64_GRANULE_SHIFT; ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info, v->arch.privregs); } else { newrrv.rid = newrid; // FIXME? region 6 needs to be uncached for EFI to work if (rreg == 6) newrrv.ve = VHPT_ENABLED_REGION_7; else newrrv.ve = VHPT_ENABLED_REGION_0_TO_6; newrrv.ps = PAGE_SHIFT; if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval; set_rr(rr,newrrv.rrval); } #else memrrv.rrval = rrv.rrval; newrrv.rid = newrid; newrrv.ve = 1; // VHPT now enabled for region 7!! newrrv.ps = PAGE_SHIFT; if (rreg == 0) { v->arch.metaphysical_saved_rr0 = vmMangleRID(newrrv.rrval); if (!PSCB(v,metaphysical_mode)) set_rr(rr,newrrv.rrval); } else if (rreg == 7) { ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info, v->arch.privregs, __get_cpu_var(vhpt_paddr), (unsigned long) pal_vaddr); } else { set_rr(rr,newrrv.rrval); } #endif return 1; } // set rr0 to the passed rid (for metaphysical mode so don't use domain offset int set_metaphysical_rr0(void) { struct vcpu *v = current; // ia64_rr rrv; // rrv.ve = 1; FIXME: TURN ME BACK ON WHEN VHPT IS WORKING ia64_set_rr(0,v->arch.metaphysical_rr0); ia64_srlz_d(); return 1; } void init_all_rr(struct vcpu *v) { ia64_rr rrv; rrv.rrval = 0; //rrv.rrval = v->domain->arch.metaphysical_rr0; rrv.ps = PAGE_SHIFT; rrv.ve = 1; if (!v->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); } VCPU(v,rrs[0]) = -1; VCPU(v,rrs[1]) = rrv.rrval; VCPU(v,rrs[2]) = rrv.rrval; VCPU(v,rrs[3]) = rrv.rrval; VCPU(v,rrs[4]) = rrv.rrval; VCPU(v,rrs[5]) = rrv.rrval; rrv.ve = 0; VCPU(v,rrs[6]) = rrv.rrval; // v->shared_info->arch.rrs[7] = rrv.rrval; } /* XEN/ia64 INTERNAL ROUTINES */ // loads a thread's region register (0-6) state into // the real physical region registers. Returns the // (possibly mangled) bits to store into rr7 // iff it is different than what is currently in physical // rr7 (because we have to to assembly and physical mode // to change rr7). If no change to rr7 is required, returns 0. // void load_region_regs(struct vcpu *v) { unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6, rr7; // TODO: These probably should be validated unsigned long bad = 0; if (VCPU(v,metaphysical_mode)) { rr0 = v->domain->arch.metaphysical_rr0; ia64_set_rr(0x0000000000000000L, rr0); ia64_srlz_d(); } else { rr0 = VCPU(v,rrs[0]); if (!set_one_rr(0x0000000000000000L, rr0)) bad |= 1; } rr1 = VCPU(v,rrs[1]); rr2 = VCPU(v,rrs[2]); rr3 = VCPU(v,rrs[3]); rr4 = VCPU(v,rrs[4]); rr5 = VCPU(v,rrs[5]); rr6 = VCPU(v,rrs[6]); rr7 = VCPU(v,rrs[7]); if (!set_one_rr(0x2000000000000000L, rr1)) bad |= 2; if (!set_one_rr(0x4000000000000000L, rr2)) bad |= 4; if (!set_one_rr(0x6000000000000000L, rr3)) bad |= 8; if (!set_one_rr(0x8000000000000000L, rr4)) bad |= 0x10; if (!set_one_rr(0xa000000000000000L, rr5)) bad |= 0x20; if (!set_one_rr(0xc000000000000000L, rr6)) bad |= 0x40; if (!set_one_rr(0xe000000000000000L, rr7)) bad |= 0x80; if (bad) { panic_domain(0,"load_region_regs: can't set! bad=%lx\n",bad); } }