aboutsummaryrefslogtreecommitdiffstats
path: root/techlibs/anlogic/cells_sim.v
blob: 0fba435720456eb2d161249d789d72461fe7b906 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
module AL_MAP_SEQ (
	output reg q,
	input ce,
	input clk,
	input sr,
	input d
);
	parameter DFFMODE = "FF"; //FF,LATCH
	parameter REGSET = "RESET"; //RESET/SET
	parameter SRMUX = "SR"; //SR/INV
	parameter SRMODE = "SYNC"; //SYNC/ASYNC

	wire clk_ce;
	assign clk_ce = ce ? clk : 1'b0;

	wire srmux;
	generate
		case (SRMUX)
			"SR": assign srmux = sr;
			"INV": assign srmux = ~sr;
			default: assign srmux = sr;
		endcase
	endgenerate	

	wire regset;
	generate
		case (REGSET)
			"RESET": assign regset = 1'b0;
			"SET": assign regset = 1'b1;
			default: assign regset = 1'b0;
		endcase
	endgenerate

	initial q = regset;

	generate
   		if (DFFMODE == "FF") 
		begin
			if (SRMODE == "ASYNC") 
			begin
				always @(posedge clk_ce, posedge srmux)
					if (srmux)
						q <= regset;
					else 
						q <= d;	
			end 
			else
			begin
				always @(posedge clk_ce)
					if (srmux)
						q <= regset;
					else 
						q <= d;	
			end
		end
		else
		begin
			// DFFMODE == "LATCH"
			if (SRMODE == "ASYNC") 
			begin
				always @(clk_ce, srmux)
					if (srmux)
						q <= regset;
					else 
						q <= d;	
			end 
			else
			begin
				always @(clk_ce)
					if (srmux)
						q <= regset;
					else 
						q <= d;	
			end
		end
    endgenerate
endmodule

module AL_MAP_LUT1 (
	output o,
	input a
);
	parameter [1:0] INIT = 2'h0;
	parameter EQN = "(A)";

	assign o = a ? INIT[1] : INIT[0];	
endmodule

module AL_MAP_LUT2 (
	output o,
	input a,
	input b
);
	parameter [3:0] INIT = 4'h0;
	parameter EQN = "(A)";

	wire [1:0] s1 = b ? INIT[ 3:2] : INIT[1:0];
	assign o = a ? s1[1] : s1[0];	
endmodule

module AL_MAP_LUT3 (
	output o,
	input a,
	input b,
	input c
);
	parameter [7:0] INIT = 8'h0;
	parameter EQN = "(A)";

	wire [3:0] s2 = c ? INIT[ 7:4] : INIT[3:0];
	wire [1:0] s1 = b ?   s2[ 3:2] :   s2[1:0];
	assign o = a ? s1[1] : s1[0];	
endmodule

module AL_MAP_LUT4 (
	output o,
	input a,
	input b,
	input c,
	input d
);
	parameter [15:0] INIT = 16'h0;
	parameter EQN = "(A)";

	wire [7:0] s3 = d ? INIT[15:8] : INIT[7:0];
	wire [3:0] s2 = c ?   s3[ 7:4] :   s3[3:0];
	wire [1:0] s1 = b ?   s2[ 3:2] :   s2[1:0];
	assign o = a ? s1[1] : s1[0];	
endmodule

module AL_MAP_LUT5 (
	output o,
	input a,
	input b,
	input c,
	input d,
	input e
);
	parameter [31:0] INIT = 32'h0;
	parameter EQN = "(A)";
	assign o = INIT >> {e, d, c, b, a};
endmodule


module AL_MAP_LUT6 (
	output o,
	input a,
	input b,
	input c,
	input d,
	input e,
	input f
);
	parameter [63:0] INIT = 64'h0;
	parameter EQN = "(A)";
	assign o = INIT >> {f, e, d, c, b, a};
endmodule

module AL_MAP_ALU2B (
   input cin,
   input a0, b0, c0, d0,
   input a1, b1, c1, d1,
   output s0, s1, cout
);
	parameter [15:0] INIT0 = 16'h0000;
	parameter [15:0] INIT1 = 16'h0000;
	parameter FUNC0 = "NO";
	parameter FUNC1 = "NO";
endmodule

module AL_MAP_ADDER (
  input a,
  input b,
  input c,
  output [1:0] o
);
	parameter ALUTYPE = "ADD";

	generate
		case (ALUTYPE)
			"ADD": 		 assign o = a + b + c;
			"SUB": 		 assign o = a - b - c;
			"A_LE_B":    assign o = a - b - c;

			"ADD_CARRY":    assign o = {  a, 1'b0 };
			"SUB_CARRY":    assign o = { ~a, 1'b0 };
			"A_LE_B_CARRY": assign o = {  a, 1'b0 };
			default: assign o = a + b + c;
		endcase
	endgenerate	

endmodule
class="o">< end_idx ) alloc_bitmap[curr_idx] = ~0UL; alloc_bitmap[curr_idx] |= (1UL<<end_off)-1; } } static void map_free(unsigned long first_page, unsigned long nr_pages) { unsigned long start_off, end_off, curr_idx, end_idx; #ifndef NDEBUG unsigned long i; /* Check that the block isn't already freed. */ for ( i = 0; i < nr_pages; i++ ) ASSERT(allocated_in_map(first_page + i)); #endif curr_idx = first_page / PAGES_PER_MAPWORD; start_off = first_page & (PAGES_PER_MAPWORD-1); end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD; end_off = (first_page + nr_pages) & (PAGES_PER_MAPWORD-1); if ( curr_idx == end_idx ) { alloc_bitmap[curr_idx] &= -(1UL<<end_off) | ((1UL<<start_off)-1); } else { alloc_bitmap[curr_idx] &= (1UL<<start_off)-1; while ( ++curr_idx != end_idx ) alloc_bitmap[curr_idx] = 0; alloc_bitmap[curr_idx] &= -(1UL<<end_off); } } /************************* * BOOT-TIME ALLOCATOR */ /* Initialise allocator to handle up to @max_page pages. */ unsigned long init_boot_allocator(unsigned long bitmap_start) { bitmap_start = round_pgup(bitmap_start); /* Allocate space for the allocation bitmap. */ bitmap_size = max_page / 8; bitmap_size = round_pgup(bitmap_size); alloc_bitmap = (unsigned long *)phys_to_virt(bitmap_start); /* All allocated by default. */ memset(alloc_bitmap, ~0, bitmap_size); return bitmap_start + bitmap_size; } void init_boot_pages(unsigned long ps, unsigned long pe) { unsigned long bad_pfn; char *p; ps = round_pgup(ps); pe = round_pgdown(pe); map_free(ps >> PAGE_SHIFT, (pe - ps) >> PAGE_SHIFT); /* Check new pages against the bad-page list. */ p = opt_badpage; while ( *p != '\0' ) { bad_pfn = simple_strtoul(p, &p, 0); if ( *p == ',' ) p++; else if ( *p != '\0' ) break; if ( (bad_pfn < (bitmap_size*8)) && !allocated_in_map(bad_pfn) ) { printk("Marking page %p as bad\n", bad_pfn); map_alloc(bad_pfn, 1); } } } unsigned long alloc_boot_pages(unsigned long size, unsigned long align) { unsigned long pg, i; size = round_pgup(size) >> PAGE_SHIFT; align = round_pgup(align) >> PAGE_SHIFT; for ( pg = 0; (pg + size) < (bitmap_size*8); pg += align ) { for ( i = 0; i < size; i++ ) if ( allocated_in_map(pg + i) ) break; if ( i == size ) { map_alloc(pg, size); return pg << PAGE_SHIFT; } } return 0; } /************************* * BINARY BUDDY ALLOCATOR */ #define MEMZONE_XEN 0 #define MEMZONE_DOM 1 #define NR_ZONES 2 /* Up to 2^10 pages can be allocated at once. */ #define MAX_ORDER 10 static struct list_head heap[NR_ZONES][MAX_ORDER+1]; static unsigned long avail[NR_ZONES]; static spinlock_t heap_lock = SPIN_LOCK_UNLOCKED; void end_boot_allocator(void) { unsigned long i, j; int curr_free = 0, next_free = 0; memset(avail, 0, sizeof(avail)); for ( i = 0; i < NR_ZONES; i++ ) for ( j = 0; j <= MAX_ORDER; j++ ) INIT_LIST_HEAD(&heap[i][j]); /* Pages that are free now go to the domain sub-allocator. */ for ( i = 0; i < max_page; i++ ) { curr_free = next_free; next_free = !allocated_in_map(i+1); if ( next_free ) map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */ if ( curr_free ) free_heap_pages(MEMZONE_DOM, pfn_to_page(i), 0); } } /* Hand the specified arbitrary page range to the specified heap zone. */ void init_heap_pages( unsigned int zone, struct pfn_info *pg, unsigned long nr_pages) { unsigned long i; ASSERT(zone < NR_ZONES); for ( i = 0; i < nr_pages; i++ ) free_heap_pages(zone, pg+i, 0); } /* Allocate 2^@order contiguous pages. */ struct pfn_info *alloc_heap_pages(unsigned int zone, unsigned int order) { int i; struct pfn_info *pg; ASSERT(zone < NR_ZONES); if ( unlikely(order > MAX_ORDER) ) return NULL; spin_lock(&heap_lock); /* Find smallest order which can satisfy the request. */ for ( i = order; i <= MAX_ORDER; i++ ) if ( !list_empty(&heap[zone][i]) ) goto found; /* No suitable memory blocks. Fail the request. */ spin_unlock(&heap_lock); return NULL; found: pg = list_entry(heap[zone][i].next, struct pfn_info, list); list_del(&pg->list); /* We may have to halve the chunk a number of times. */ while ( i != order ) { PFN_ORDER(pg) = --i; list_add_tail(&pg->list, &heap[zone][i]); pg += 1 << i; } map_alloc(page_to_pfn(pg), 1 << order); avail[zone] -= 1 << order; spin_unlock(&heap_lock); return pg; } /* Free 2^@order set of pages. */ void free_heap_pages( unsigned int zone, struct pfn_info *pg, unsigned int order) { unsigned long mask; ASSERT(zone < NR_ZONES); ASSERT(order <= MAX_ORDER); spin_lock(&heap_lock); map_free(page_to_pfn(pg), 1 << order); avail[zone] += 1 << order; /* Merge chunks as far as possible. */ while ( order < MAX_ORDER ) { mask = 1 << order; if ( (page_to_pfn(pg) & mask) ) { /* Merge with predecessor block? */ if ( allocated_in_map(page_to_pfn(pg)-mask) || (PFN_ORDER(pg-mask) != order) ) break; list_del(&(pg-mask)->list); pg -= mask; } else { /* Merge with successor block? */ if ( allocated_in_map(page_to_pfn(pg)+mask) || (PFN_ORDER(pg+mask) != order) ) break; list_del(&(pg+mask)->list); } order++; } PFN_ORDER(pg) = order; list_add_tail(&pg->list, &heap[zone][order]); spin_unlock(&heap_lock); } /* * Scrub all unallocated pages in all heap zones. This function is more * convoluted than appears necessary because we do not want to continuously * hold the lock or disable interrupts while scrubbing very large memory areas. */ void scrub_heap_pages(void) { void *p; unsigned long pfn, flags; printk("Scrubbing Free RAM: "); for ( pfn = 0; pfn < (bitmap_size * 8); pfn++ ) { /* Every 100MB, print a progress dot and appease the watchdog. */ if ( (pfn % ((100*1024*1024)/PAGE_SIZE)) == 0 ) { printk("."); touch_nmi_watchdog(); } /* Quick lock-free check. */ if ( allocated_in_map(pfn) ) continue; spin_lock_irqsave(&heap_lock, flags); /* Re-check page status with lock held. */ if ( !allocated_in_map(pfn) ) { p = map_domain_mem(pfn << PAGE_SHIFT); clear_page(p); unmap_domain_mem(p); } spin_unlock_irqrestore(&heap_lock, flags); } printk("done.\n"); } /************************* * XEN-HEAP SUB-ALLOCATOR */ void init_xenheap_pages(unsigned long ps, unsigned long pe) { unsigned long flags; ps = round_pgup(ps); pe = round_pgdown(pe); memguard_guard_range(__va(ps), pe - ps); local_irq_save(flags);