aboutsummaryrefslogtreecommitdiffstats
path: root/tests
Commit message (Collapse)AuthorAgeFilesLines
* Add another testEddie Hung2019-12-161-1/+8
|
* Accidentally commented out testsEddie Hung2019-12-161-47/+47
|
* Add unconditional match blocks for force RAMEddie Hung2019-12-161-0/+9
|
* Merge blockram testsEddie Hung2019-12-163-47/+81
|
* Fixing compiler warning/issues. Moving test script to the correct placeDiego H2019-12-161-6/+6
|
* Removing fixed attribute value to !ramstyle rulesDiego H2019-12-151-3238/+0
|
* Merging attribute rules into a single match block; Adding testsDiego H2019-12-153-0/+3373
|
* Renaming BRAM memory tests for the sake of uniformityDiego H2019-12-132-6/+6
|
* Fixing citation in xc7_xcu_brams.txt file. Fixing RAMB36E1 test.Diego H2019-12-121-2/+2
|
* Adding a note (TODO) in the memory_params.ys check fileDiego H2019-12-121-0/+2
|
* Updating RAMB36E1 thresholds. Adding test for both RAMB18E1/RAMB36E1Diego H2019-12-122-0/+90
|
* Merge pull request #1545 from YosysHQ/eddie/ice40_wrapcarry_attrEddie Hung2019-12-093-23/+136
|\ | | | | Preserve SB_CARRY name and attributes when using $__ICE40_CARRY_WRAPPER
| * unmap $__ICE40_CARRY_WRAPPER in testEddie Hung2019-12-091-1/+21
| |
| * ice40_wrapcarry to really preserve attributes via -unwrap optionEddie Hung2019-12-091-3/+5
| |
| * Drop keep=0 attributes on SB_CARRYEddie Hung2019-12-061-2/+2
| |
| * Add WIP test for unwrapping $__ICE40_CARRY_WRAPPEREddie Hung2019-12-051-0/+30
| |
| * Check SB_CARRY name also preservedEddie Hung2019-12-031-0/+1
| |
| * Add testcaseEddie Hung2019-12-031-0/+60
| |
* | tests: arch: xilinx: Change order of arguments in macc.shJan Kowalewski2019-12-061-1/+1
| |
* | iopadmap: Refactor and fix tristate buffer mapping. (#1527)Marcin Kościelnicki2019-12-041-0/+99
|/ | | | | | | The previous code for rerouting wires when inserting tristate buffers was overcomplicated and didn't handle all cases correctly (in particular, only cell connections were rewired — internal connections were not).
* Merge pull request #1524 from pepijndevos/gowindffinitClifford Wolf2019-12-033-2/+301
|\ | | | | Gowin: add and test DFF init values
| * update testPepijn de Vos2019-12-031-2/+3
| |
| * Use -match-init to not synth contradicting init valuesPepijn de Vos2019-12-031-10/+12
| |
| * attempt to fix formattingPepijn de Vos2019-11-251-138/+138
| |
| * gowin: add and test dff init valuesPepijn de Vos2019-11-252-0/+296
| |
* | abc9: Fix breaking of SCCsDavid Shah2019-12-011-0/+6
| | | | | | | | Signed-off-by: David Shah <dave@ds0.me>
* | Merge pull request #1536 from YosysHQ/eddie/xilinx_dsp_muladdEddie Hung2019-11-271-0/+69
|\ \ | | | | | | xilinx_dsp: consider sign and zero-extension when packing post-multiplier adder
| * | No need for -abc9Eddie Hung2019-11-261-1/+1
| | |
| * | Add citationEddie Hung2019-11-261-0/+1
| | |
| * | Add testcase derived from fastfir_dynamictaps benchmarkEddie Hung2019-11-261-0/+68
| | |
* | | Merge pull request #1534 from YosysHQ/mwk/opt_share-fixClifford Wolf2019-11-271-0/+13
|\ \ \ | | | | | | | | opt_share: Fix handling of fine cells.
| * | | opt_share: Fix handling of fine cells.Marcin Kościelnicki2019-11-271-0/+13
| |/ / | | | | | | | | | Fixes #1525.
* / / Remove notesEddie Hung2019-11-261-9/+0
|/ /
* | clkbufmap: Add support for inverters in clock path.Marcin Kościelnicki2019-11-251-5/+16
| |
* | xilinx: Use INV instead of LUT1 when applicableMarcin Kościelnicki2019-11-254-8/+8
|/
* Merge pull request #1511 from YosysHQ/dave/alwaysClifford Wolf2019-11-221-0/+63
|\ | | | | sv: Error checking for always_comb, always_latch and always_ff
| * sv: Add tests for SV always typesDavid Shah2019-11-211-0/+63
| | | | | | | | Signed-off-by: David Shah <dave@ds0.me>
* | gowin: Remove show command from tests.Marcin Kościelnicki2019-11-221-1/+0
|/
* Merge pull request #1449 from pepijndevos/gowinClifford Wolf2019-11-1912-0/+248
|\ | | | | Improvements for gowin support
| * Merge branch 'master' of https://github.com/YosysHQ/yosys into gowinPepijn de Vos2019-11-165-17/+34
| |\
| * | fix fsm test with proper clock enable polarityPepijn de Vos2019-11-111-0/+11
| | |
| * | fix wide lutsPepijn de Vos2019-11-061-7/+10
| | |
| * | don't cound exact luts in big muxes; futile and fragilePepijn de Vos2019-10-301-3/+0
| | |
| * | add tristate buffer and testPepijn de Vos2019-10-281-0/+13
| | |
| * | do not use wide luts in testcasePepijn de Vos2019-10-281-3/+3
| | |
| * | ALU sim tweaksPepijn de Vos2019-10-241-2/+2
| | |
| * | Add some testsPepijn de Vos2019-10-2110-0/+224
| | | | | | | | | | | | | | | | | | | | | | | | Copied from Efinix. * fsm is broken * latch and tribuf are not implemented yet * memory maps to dram
* | | Fix #1462, #1480.Marcin Kościelnicki2019-11-192-0/+29
| | |
* | | Fix #1496.Marcin Kościelnicki2019-11-181-0/+13
| |/ |/|
* | Fixed testsMiodrag Milanovic2019-11-115-17/+34
|/
uct bhdr *matrix[REAL_FLI][MAX_SLI]; spinlock_t lock; unsigned long init_size; unsigned long max_size; unsigned long grow_size; /* Basic stats */ unsigned long used_size; unsigned long num_regions; /* User provided functions for expanding/shrinking pool */ xmem_pool_get_memory *get_mem; xmem_pool_put_memory *put_mem; struct list_head list; void *init_region; char name[MAX_POOL_NAME_LEN]; }; /* * Helping functions */ /** * Returns indexes (fl, sl) of the list used to serve request of size r */ static inline void MAPPING_SEARCH(unsigned long *r, int *fl, int *sl) { int t; if ( *r < SMALL_BLOCK ) { *fl = 0; *sl = *r / (SMALL_BLOCK / MAX_SLI); } else { t = (1 << (fls(*r) - 1 - MAX_LOG2_SLI)) - 1; *r = *r + t; *fl = fls(*r) - 1; *sl = (*r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI; *fl -= FLI_OFFSET; /*if ((*fl -= FLI_OFFSET) < 0) // FL will be always >0! *fl = *sl = 0; */ *r &= ~t; } } /** * Returns indexes (fl, sl) which is used as starting point to search * for a block of size r. It also rounds up requested size(r) to the * next list. */ static inline void MAPPING_INSERT(unsigned long r, int *fl, int *sl) { if ( r < SMALL_BLOCK ) { *fl = 0; *sl = r / (SMALL_BLOCK / MAX_SLI); } else { *fl = fls(r) - 1; *sl = (r >> (*fl - MAX_LOG2_SLI)) - MAX_SLI; *fl -= FLI_OFFSET; } } /** * Returns first block from a list that hold blocks larger than or * equal to the one pointed by the indexes (fl, sl) */ static inline struct bhdr *FIND_SUITABLE_BLOCK(struct xmem_pool *p, int *fl, int *sl) { u32 tmp = p->sl_bitmap[*fl] & (~0 << *sl); struct bhdr *b = NULL; if ( tmp ) { *sl = ffs(tmp) - 1; b = p->matrix[*fl][*sl]; } else { *fl = ffs(p->fl_bitmap & (~0 << (*fl + 1))) - 1; if ( likely(*fl > 0) ) { *sl = ffs(p->sl_bitmap[*fl]) - 1; b = p->matrix[*fl][*sl]; } } return b; } /** * Remove first free block(b) from free list with indexes (fl, sl). */ static inline void EXTRACT_BLOCK_HDR(struct bhdr *b, struct xmem_pool *p, int fl, int sl) { p->matrix[fl][sl] = b->ptr.free_ptr.next; if ( p->matrix[fl][sl] ) { p->matrix[fl][sl]->ptr.free_ptr.prev = NULL; } else { clear_bit(sl, &p->sl_bitmap[fl]); if ( !p->sl_bitmap[fl] ) clear_bit(fl, &p->fl_bitmap); } b->ptr.free_ptr = (struct free_ptr) {NULL, NULL}; } /** * Removes block(b) from free list with indexes (fl, sl) */ static inline void EXTRACT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl, int sl) { if ( b->ptr.free_ptr.next ) b->ptr.free_ptr.next->ptr.free_ptr.prev = b->ptr.free_ptr.prev; if ( b->ptr.free_ptr.prev ) b->ptr.free_ptr.prev->ptr.free_ptr.next = b->ptr.free_ptr.next; if ( p->matrix[fl][sl] == b ) { p->matrix[fl][sl] = b->ptr.free_ptr.next; if ( !p->matrix[fl][sl] ) { clear_bit(sl, &p->sl_bitmap[fl]); if ( !p->sl_bitmap[fl] ) clear_bit (fl, &p->fl_bitmap); } } b->ptr.free_ptr = (struct free_ptr) {NULL, NULL}; } /** * Insert block(b) in free list with indexes (fl, sl) */ static inline void INSERT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl, int sl) { b->ptr.free_ptr = (struct free_ptr) {NULL, p->matrix[fl][sl]}; if ( p->matrix[fl][sl] ) p->matrix[fl][sl]->ptr.free_ptr.prev = b; p->matrix[fl][sl] = b; set_bit(sl, &p->sl_bitmap[fl]); set_bit(fl, &p->fl_bitmap); } /** * Region is a virtually contiguous memory region and Pool is * collection of such regions */ static inline void ADD_REGION(void *region, unsigned long region_size, struct xmem_pool *pool) { int fl, sl; struct bhdr *b, *lb; b = (struct bhdr *)(region); b->prev_hdr = NULL; b->size = ROUNDDOWN_SIZE(region_size - 2 * BHDR_OVERHEAD) | FREE_BLOCK | PREV_USED; MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl); INSERT_BLOCK(b, pool, fl, sl); /* The sentinel block: allows us to know when we're in the last block */ lb = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK); lb->prev_hdr = b; lb->size = 0 | USED_BLOCK | PREV_FREE; pool->used_size += BHDR_OVERHEAD; /* only sentinel block is "used" */ pool->num_regions++; } /* * TLSF pool-based allocator start. */ struct xmem_pool *xmem_pool_create( const char *name, xmem_pool_get_memory get_mem, xmem_pool_put_memory put_mem, unsigned long init_size, unsigned long max_size, unsigned long grow_size) { struct xmem_pool *pool; int pool_bytes, pool_order; BUG_ON(max_size && (max_size < init_size)); pool_bytes = ROUNDUP_SIZE(sizeof(*pool)); pool_order = get_order_from_bytes(pool_bytes); pool = (void *)alloc_xenheap_pages(pool_order, 0); if ( pool == NULL ) return NULL; memset(pool, 0, pool_bytes); /* Round to next page boundary */ init_size = ROUNDUP_PAGE(init_size); max_size = ROUNDUP_PAGE(max_size); grow_size = ROUNDUP_PAGE(grow_size); /* pool global overhead not included in used size */ pool->used_size = 0; pool->init_size = init_size; pool->max_size = max_size; pool->grow_size = grow_size; pool->get_mem = get_mem; pool->put_mem = put_mem; strlcpy(pool->name, name, sizeof(pool->name)); /* always obtain init_region lazily now to ensure it is get_mem'd * in the same "context" as all other regions */ spin_lock_init(&pool->lock); spin_lock(&pool_list_lock); list_add_tail(&pool->list, &pool_list_head); spin_unlock(&pool_list_lock); return pool; } unsigned long xmem_pool_get_used_size(struct xmem_pool *pool) { return pool->used_size; } unsigned long xmem_pool_get_total_size(struct xmem_pool *pool) { unsigned long total; total = ROUNDUP_SIZE(sizeof(*pool)) + pool->init_size + (pool->num_regions - 1) * pool->grow_size; return total; } void xmem_pool_destroy(struct xmem_pool *pool) { int pool_bytes, pool_order; if ( pool == NULL ) return; /* User is destroying without ever allocating from this pool */ if ( xmem_pool_get_used_size(pool) == BHDR_OVERHEAD ) { ASSERT(!pool->init_region); pool->used_size -= BHDR_OVERHEAD; } /* Check for memory leaks in this pool */ if ( xmem_pool_get_used_size(pool) ) printk("memory leak in pool: %s (%p). " "%lu bytes still in use.\n", pool->name, pool, xmem_pool_get_used_size(pool)); spin_lock(&pool_list_lock); list_del_init(&pool->list); spin_unlock(&pool_list_lock); pool_bytes = ROUNDUP_SIZE(sizeof(*pool)); pool_order = get_order_from_bytes(pool_bytes); free_xenheap_pages(pool,pool_order); } void *xmem_pool_alloc(unsigned long size, struct xmem_pool *pool) { struct bhdr *b, *b2, *next_b, *region; int fl, sl; unsigned long tmp_size; if ( pool->init_region == NULL ) { if ( (region = pool->get_mem(pool->init_size)) == NULL ) goto out; ADD_REGION(region, pool->init_size, pool); pool->init_region = region; } size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size); /* Rounding up the requested size and calculating fl and sl */ spin_lock(&pool->lock); retry_find: MAPPING_SEARCH(&size, &fl, &sl); /* Searching a free block */ if ( !(b = FIND_SUITABLE_BLOCK(pool, &fl, &sl)) ) { /* Not found */ if ( size > (pool->grow_size - 2 * BHDR_OVERHEAD) ) goto out_locked; if ( pool->max_size && (pool->init_size + pool->num_regions * pool->grow_size > pool->max_size) ) goto out_locked; spin_unlock(&pool->lock); if ( (region = pool->get_mem(pool->grow_size)) == NULL ) goto out; spin_lock(&pool->lock); ADD_REGION(region, pool->grow_size, pool); goto retry_find; } EXTRACT_BLOCK_HDR(b, pool, fl, sl); /*-- found: */ next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK); /* Should the block be split? */ tmp_size = (b->size & BLOCK_SIZE_MASK) - size; if ( tmp_size >= sizeof(struct bhdr) ) { tmp_size -= BHDR_OVERHEAD; b2 = GET_NEXT_BLOCK(b->ptr.buffer, size); b2->size = tmp_size | FREE_BLOCK | PREV_USED; b2->prev_hdr = b; next_b->prev_hdr = b2; MAPPING_INSERT(tmp_size, &fl, &sl); INSERT_BLOCK(b2, pool, fl, sl); b->size = size | (b->size & PREV_STATE); } else { next_b->size &= (~PREV_FREE); b->size &= (~FREE_BLOCK); /* Now it's used */ } pool->used_size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD; spin_unlock(&pool->lock); return (void *)b->ptr.buffer; /* Failed alloc */ out_locked: spin_unlock(&pool->lock); out: return NULL; } void xmem_pool_free(void *ptr, struct xmem_pool *pool) { struct bhdr *b, *tmp_b; int fl = 0, sl = 0; if ( unlikely(ptr == NULL) ) return; b = (struct bhdr *)((char *) ptr - BHDR_OVERHEAD); spin_lock(&pool->lock); b->size |= FREE_BLOCK; pool->used_size -= (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD; b->ptr.free_ptr = (struct free_ptr) { NULL, NULL}; tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK); if ( tmp_b->size & FREE_BLOCK ) { MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl); EXTRACT_BLOCK(tmp_b, pool, fl, sl); b->size += (tmp_b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD; } if ( b->size & PREV_FREE ) { tmp_b = b->prev_hdr; MAPPING_INSERT(tmp_b->size & BLOCK_SIZE_MASK, &fl, &sl); EXTRACT_BLOCK(tmp_b, pool, fl, sl); tmp_b->size += (b->size & BLOCK_SIZE_MASK) + BHDR_OVERHEAD; b = tmp_b; } tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE_MASK); tmp_b->prev_hdr = b; MAPPING_INSERT(b->size & BLOCK_SIZE_MASK, &fl, &sl); if ( (b->prev_hdr == NULL) && ((tmp_b->size & BLOCK_SIZE_MASK) == 0) ) { pool->put_mem(b); pool->num_regions--; pool->used_size -= BHDR_OVERHEAD; /* sentinel block header */ goto out; } INSERT_BLOCK(b, pool, fl, sl); tmp_b->size |= PREV_FREE; tmp_b->prev_hdr = b; out: spin_unlock(&pool->lock); } int xmem_pool_maxalloc(struct xmem_pool *pool) { return pool->grow_size - (2 * BHDR_OVERHEAD); } /* * Glue for xmalloc(). */ static struct xmem_pool *xenpool; static void *xmalloc_pool_get(unsigned long size) { ASSERT(size == PAGE_SIZE); return alloc_xenheap_page(); } static void xmalloc_pool_put(void *p) { free_xenheap_page(p); } static void *xmalloc_whole_pages(unsigned long size) { struct bhdr *b; unsigned int i, pageorder = get_order_from_bytes(size + BHDR_OVERHEAD); char *p; b = alloc_xenheap_pages(pageorder, 0); if ( b == NULL ) return NULL; b->size = PAGE_ALIGN(size + BHDR_OVERHEAD); for ( p = (char *)b + b->size, i = 0; i < pageorder; ++i ) if ( (unsigned long)p & (PAGE_SIZE << i) ) { free_xenheap_pages(p, i); p += PAGE_SIZE << i; } return (void *)b->ptr.buffer; } static void tlsf_init(void) { INIT_LIST_HEAD(&pool_list_head); spin_lock_init(&pool_list_lock); xenpool = xmem_pool_create( "xmalloc", xmalloc_pool_get, xmalloc_pool_put, PAGE_SIZE, 0, PAGE_SIZE); BUG_ON(!xenpool); } /* * xmalloc() */ void *_xmalloc(unsigned long size, unsigned long align) { void *p = NULL; u32 pad; ASSERT(!in_irq()); ASSERT((align & (align - 1)) == 0); if ( align < MEM_ALIGN ) align = MEM_ALIGN; size += align - MEM_ALIGN; if ( !xenpool ) tlsf_init(); if ( size < PAGE_SIZE ) p = xmem_pool_alloc(size, xenpool); if ( p == NULL ) p = xmalloc_whole_pages(size); /* Add alignment padding. */ if ( (pad = -(long)p & (align - 1)) != 0 ) { char *q = (char *)p + pad; struct bhdr *b = (struct bhdr *)(q - BHDR_OVERHEAD); ASSERT(q > (char *)p); b->size = pad | 1; p = q; } ASSERT(((unsigned long)p & (align - 1)) == 0); return p; } void *_xzalloc(unsigned long size, unsigned long align) { void *p = _xmalloc(size, align); return p ? memset(p, 0, size) : p; } void xfree(void *p) { struct bhdr *b; ASSERT(!in_irq()); if ( p == NULL ) return; /* Strip alignment padding. */ b = (struct bhdr *)((char *) p - BHDR_OVERHEAD); if ( b->size & 1 ) { p = (char *)p - (b->size & ~1u); b = (struct bhdr *)((char *)p - BHDR_OVERHEAD); ASSERT(!(b->size & 1)); } if ( b->size >= PAGE_SIZE ) { unsigned int i, order = get_order_from_bytes(b->size); BUG_ON((unsigned long)b & ((PAGE_SIZE << order) - 1)); for ( i = 0; ; ++i ) { if ( !(b->size & (PAGE_SIZE << i)) ) continue; b->size -= PAGE_SIZE << i; free_xenheap_pages((void *)b + b->size, i); if ( i + 1 >= order ) break; } } else xmem_pool_free(p, xenpool); }