aboutsummaryrefslogtreecommitdiffstats
path: root/fpga_interchange/arch_pack_clusters.cc
diff options
context:
space:
mode:
authorMaciej Dudek <mdudek@antmicro.com>2021-08-30 11:12:49 +0200
committerMaciej Dudek <mdudek@antmicro.com>2021-09-23 15:43:23 +0200
commitfdcfe8cd8188b6c4ea2450843bd22d822856a091 (patch)
treed23b1e69f725e9cd3cda0f45162014b75eb36840 /fpga_interchange/arch_pack_clusters.cc
parentd9a71083e1a081f89e1aa4c357bc3b828eea6709 (diff)
downloadnextpnr-fdcfe8cd8188b6c4ea2450843bd22d822856a091.tar.gz
nextpnr-fdcfe8cd8188b6c4ea2450843bd22d822856a091.tar.bz2
nextpnr-fdcfe8cd8188b6c4ea2450843bd22d822856a091.zip
Adding support for MacroCells
Diffstat (limited to 'fpga_interchange/arch_pack_clusters.cc')
-rw-r--r--fpga_interchange/arch_pack_clusters.cc341
1 files changed, 338 insertions, 3 deletions
diff --git a/fpga_interchange/arch_pack_clusters.cc b/fpga_interchange/arch_pack_clusters.cc
index f4e50233..3514d7c7 100644
--- a/fpga_interchange/arch_pack_clusters.cc
+++ b/fpga_interchange/arch_pack_clusters.cc
@@ -40,7 +40,8 @@ enum ClusterWireNodeState
enum ExpansionDirection
{
CLUSTER_UPHILL_DIR = 0,
- CLUSTER_DOWNHILL_DIR = 1
+ CLUSTER_DOWNHILL_DIR = 1,
+ CLUSTER_BOTH_DIR = 2
};
struct ClusterWireNode
@@ -370,6 +371,330 @@ static bool check_cluster_cells_compatibility(CellInfo *old_cell, CellInfo *new_
return true;
}
+bool reduce(uint32_t x, uint32_t y, const ClusterPOD *cluster, dict<uint32_t, pool<CellInfo *, hash_ptr_ops>> &domain, Context *ctx){
+ bool change = false;
+ std::vector<CellInfo *> remove_cell;
+ uint32_t counter = 0;
+ for (const auto &connection : cluster->connection_graph[x].connections){
+ if(connection.target_idx == y)
+ break;
+ counter ++;
+ }
+ for (const auto &x_cell : domain[x]){
+ if (ctx->verbose)
+ log_info("Testing cell %s\n", x_cell->name.c_str(ctx));
+ bool found = false;
+ for (const auto &y_cell : domain[y]){
+ if (ctx->verbose)
+ log_info(" - Y candidate: %s\n", y_cell->name.c_str(ctx));
+ for (const auto edge : cluster->connection_graph[x].connections[counter].edges){
+ if (!x_cell->ports.count(IdString(edge.cell_pin)) || !y_cell->ports.count(IdString(edge.other_cell_pin)))
+ break;
+ const auto x_net = x_cell->ports[IdString(edge.cell_pin)].net;
+ const auto y_net = y_cell->ports[IdString(edge.other_cell_pin)].net;
+
+ if (x_net != y_net)
+ break;
+ bool x_driver = x_net->driver.cell == x_cell;
+ bool y_driver = y_net->driver.cell == y_cell;
+ if ((edge.dir != 0 || !y_driver) && (edge.dir != 1 || !x_driver) && (edge.dir != 2 || y_driver || x_driver))
+ break;
+ found = true;
+ }
+ if (found){
+ log_info(" - Works for %s\n", y_cell->name.c_str(ctx));
+ break;
+ }
+ }
+ if (!found)
+ remove_cell.push_back(x_cell);
+ }
+
+ for (const auto &cell : remove_cell){
+ domain[x].erase(cell);
+ change = true;
+ }
+
+ return change;
+}
+
+void binary_constraint_check(const ClusterPOD *cluster,
+ std::queue<std::pair<uint32_t, uint32_t>> &workqueue,
+ dict<uint32_t, pool<CellInfo *, hash_ptr_ops>> &idx_to_cells, Context *ctx){
+ while (!workqueue.empty()){
+ std::pair<uint32_t, uint32_t> arc = workqueue.front();
+ workqueue.pop();
+ uint32_t x,y;
+ x = arc.first; y = arc.second;
+ if (ctx->verbose)
+ log_info("Checking pair %d:%d\n", x, y);
+ if (reduce(x, y, cluster, idx_to_cells, ctx)){
+ for (const auto &connection : cluster->connection_graph[arc.first].connections)
+ if (connection.target_idx != y)
+ workqueue.push(std::pair<uint32_t, uint32_t>(arc.first, connection.target_idx));
+ }
+ }
+}
+
+bool back_solver(const ClusterPOD *cluster,
+ dict<uint32_t, pool<CellInfo *, hash_ptr_ops>> &idx_to_cells, Context *ctx){
+ dict<CellInfo *, pool<uint32_t>, hash_ptr_ops> possible_idx;
+ for (const auto &arc : idx_to_cells)
+ for (const auto &cell : arc.second)
+ possible_idx[cell].insert(arc.first);
+ std::queue<uint32_t> prep;
+ for (const auto &arc : idx_to_cells){
+ if (arc.second.size() == 0)
+ return false;
+ if (arc.second.size()>1){
+ for (const auto &cell : arc.second){
+ auto copy_idx_to_cells(idx_to_cells);
+ copy_idx_to_cells[arc.first].clear();
+ for (uint32_t idx : possible_idx[cell]){
+ copy_idx_to_cells[idx].erase(cell);
+ prep.push(idx);
+ }
+ copy_idx_to_cells[arc.first].insert(cell);
+ std::queue<std::pair<uint32_t, uint32_t>> workqueue;
+ while(!prep.empty()){
+ uint32_t idx = prep.front(); prep.pop();
+ for (const auto &connection : cluster->connection_graph[idx].connections)
+ if (arc.first != connection.target_idx)
+ workqueue.push(std::pair<uint32_t, uint32_t>(arc.first, connection.target_idx));
+ }
+ binary_constraint_check(cluster, workqueue, copy_idx_to_cells, ctx);
+ if (back_solver(cluster, copy_idx_to_cells, ctx)){
+ idx_to_cells = std::move(copy_idx_to_cells);
+ return true;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+void Arch::prepare_macro_cluster( const ClusterPOD *cluster, uint32_t index)
+{
+ Context *ctx = getCtx();
+ IdString cluster_name(cluster->name);
+
+ pool<IdString> cluster_cell_types;
+ for (auto cell_type : cluster->root_cell_types)
+ cluster_cell_types.insert(IdString(cell_type));
+
+ // Find cluster roots for each macro only ones
+ dict<IdString, CellInfo *> roots;
+ for (auto &cell : cells){
+ CellInfo *ci = cell.second.get();
+ if(ci->macro_parent == IdString())
+ continue;
+ if(ci->cluster != ClusterId())
+ continue;
+ if (!cluster_cell_types.count(ci->type))
+ continue;
+ if(roots.count(ci->macro_parent))
+ continue;
+ // Simple check based on cell type counting
+ dict<IdString, uint32_t> cells_in_macro, counter;
+ pool<IdString> cell_types;
+ for (auto &cell_type : cluster->required_cells){
+ cells_in_macro[IdString(cell_type.name)] = cell_type.count;
+ cell_types.insert(IdString(cell_type.name));
+ }
+
+ for (auto &node_cell : macro_to_cells[ci->macro_parent]){
+ auto cell_type = node_cell->type;
+ if(!counter.count(cell_type))
+ counter[cell_type] = 0;
+ counter[cell_type]++;
+ cell_types.insert(cell_type);
+ }
+ bool failed = false;
+ for(auto cell_type : cell_types){
+ if(ctx->verbose && cells_in_macro.count(cell_type))
+ log_info("Required: %s %d\n", cell_type.c_str(ctx), cells_in_macro[cell_type]);
+ if(ctx->verbose && cells_in_macro.count(cell_type))
+ log_info("Have: %s %d\n", cell_type.c_str(ctx), counter[cell_type]);
+ if(!cells_in_macro.count(cell_type) || !counter.count(cell_type) || cells_in_macro[cell_type] != counter[cell_type])
+ failed = true;
+ if(failed && ctx->verbose)
+ log_info("Cell count stage failed, for sure not this cluster\n");
+ if(failed)
+ break;
+ }
+ if(failed){
+ roots[ci->macro_parent] = nullptr;
+ continue;
+ }
+
+ // Arc consistency
+ dict<uint32_t, pool<CellInfo *, hash_ptr_ops>> idx_to_cells;
+ // First singular constraints, like used cell type and used_cell ports
+ for (auto &cell : macro_to_cells[ci->macro_parent])
+ for (auto &node : cluster->connection_graph)
+ if (IdString(node.cell_type) == cell->type)
+ if (node.idx != 0 && cell->name != ci->name ||
+ node.idx == 0 && cell->name == ci->name ){
+ idx_to_cells[node.idx].insert(cell);
+ }
+
+ for (auto &arc : idx_to_cells){
+ std::vector<CellInfo *> remove_cell;
+ pool<IdString> used_ports;
+ for (const auto &port : cluster->connection_graph[arc.first].used_ports)
+ used_ports.insert(IdString(port.name));
+ for (const auto &cell : arc.second){
+ uint32_t count = 0;
+ for (const auto &port : cell->ports){
+ if (!used_ports.count(port.first)){
+ remove_cell.push_back(cell);
+ break;
+ }
+ count++;
+ }
+ if (count != used_ports.size()){
+ remove_cell.push_back(cell);
+ break;
+ }
+ }
+ for (const auto &cell : remove_cell){
+ arc.second.erase(cell);
+ }
+ }
+ if (ctx->verbose){
+ log_info("After mono constraints are applied\n");
+ dict<CellInfo *, pool<uint32_t>, hash_ptr_ops> possible_idx;
+ for (const auto &arc : idx_to_cells)
+ for (const auto &cell : arc.second)
+ possible_idx[cell].insert(arc.first);
+
+ for (const auto arc : possible_idx){
+ log_info("Possible idx %s:\n", arc.first->name.c_str(ctx));
+ for (const auto idx : arc.second)
+ log_info(" - %d\n", idx);
+ }
+ }
+ // Solve for binary constraints
+ std::queue<std::pair<uint32_t, uint32_t>> workqueue;
+ for (const auto &arc : idx_to_cells)
+ for (const auto &connection : cluster->connection_graph[arc.first].connections)
+ workqueue.push(std::pair<uint32_t, uint32_t>(arc.first, connection.target_idx));
+
+ binary_constraint_check(cluster, workqueue, idx_to_cells, ctx);
+ for (const auto &arc : idx_to_cells){
+ if (arc.second.size() == 0){
+ if (ctx->verbose)
+ log_info("AC-3 failed\n");
+ failed = true;
+ break;
+ }
+ }
+ if (failed)
+ continue;
+
+ if (ctx->verbose){
+ log_info("After AC-3\n");
+ dict<CellInfo *, pool<uint32_t>, hash_ptr_ops> possible_idx;
+ for (const auto &arc : idx_to_cells)
+ for (const auto &cell : arc.second)
+ possible_idx[cell].insert(arc.first);
+
+ for (const auto arc : possible_idx){
+ log_info("Possible idx %s:\n", arc.first->name.c_str(ctx));
+ for (const auto idx : arc.second)
+ log_info(" - %d\n", idx);
+ }
+ }
+
+ bool change = false;
+ std::queue<std::pair<uint32_t, CellInfo *>> removequeue;
+ // Keep assigning cells to indices that only map to single cell
+ // Remove this cell from other mappings and recheck binary constraints
+ // Fail if there is no cell for idx or cell has no idx assign
+ do{
+ change = false;
+ dict<CellInfo *, pool<uint32_t>, hash_ptr_ops> possible_idx;
+ pool<uint32_t> changed_idxs;
+ for (const auto &arc : idx_to_cells){
+ if (arc.second.size() == 0){
+ failed = true;
+ break;
+ }
+ for (const auto &cell : arc.second)
+ possible_idx[cell].insert(arc.first);
+ }
+ if(failed)
+ break;
+ for (auto &cell : macro_to_cells[ci->macro_parent])
+ if (possible_idx[cell].size() == 0){
+ failed = true;
+ break;
+ }
+ if(failed)
+ break;
+ for (const auto &arc : idx_to_cells){
+ if (arc.second.size() == 1)
+ for (const auto &idx : possible_idx[*arc.second.begin()])
+ if (idx != arc.first)
+ removequeue.push(std::pair<uint32_t, CellInfo*>(idx, *arc.second.begin()));
+ }
+ while(!removequeue.empty()){
+ auto t = removequeue.front(); removequeue.pop();
+ uint32_t idx = t.first;
+ CellInfo *cell = t.second;
+ idx_to_cells[idx].erase(cell);
+ change = true;
+ changed_idxs.insert(idx);
+ }
+ for (const uint32_t &idx : changed_idxs)
+ for (const auto &connection : cluster->connection_graph[idx].connections)
+ workqueue.push(std::pair<uint32_t, uint32_t>(idx, connection.target_idx));
+
+ binary_constraint_check(cluster, workqueue, idx_to_cells, ctx);
+ }while(change);
+ if(failed){
+ if(ctx->verbose)
+ log_info("Single cell mapping failed\n");
+ continue;
+ }
+ if (ctx->verbose){
+ log_info("After mapping indices with single cell\n");
+ dict<CellInfo *, pool<uint32_t>, hash_ptr_ops> possible_idx;
+ for (const auto &arc : idx_to_cells)
+ for (const auto &cell : arc.second)
+ possible_idx[cell].insert(arc.first);
+
+ for (const auto arc : possible_idx){
+ log_info("Possible idx %s:\n", arc.first->name.c_str(ctx));
+ for (const auto idx : arc.second)
+ log_info(" - %d\n", idx);
+ }
+ }
+ // At this point all indices that cloud only be mapped to single cell are mapped
+ // Next step is to run solver with backtracing to solve for other idx<->cell mappings
+ if (ctx->verbose)
+ log_info("Back solver\n");
+ if(!back_solver(cluster, idx_to_cells, ctx)){
+ if(ctx->verbose)
+ log_info("Back solver failed\n");
+ continue;
+ }
+ if (ctx->verbose){
+ log_info("Final mapping after back solver\n");
+ dict<CellInfo *, pool<uint32_t>, hash_ptr_ops> possible_idx;
+ for (const auto &arc : idx_to_cells)
+ for (const auto &cell : arc.second)
+ possible_idx[cell].insert(arc.first);
+
+ for (const auto arc : possible_idx){
+ log_info("Possible idx %s:\n", arc.first->name.c_str(ctx));
+ for (const auto idx : arc.second)
+ log_info(" - %d\n", idx);
+ }
+ }
+ }
+}
+
void Arch::prepare_cluster(const ClusterPOD *cluster, uint32_t index)
{
Context *ctx = getCtx();
@@ -383,6 +708,8 @@ void Arch::prepare_cluster(const ClusterPOD *cluster, uint32_t index)
std::vector<CellInfo *> roots;
for (auto &cell : cells) {
CellInfo *ci = cell.second.get();
+ if (ci->macro_parent != IdString())
+ continue;
if (ci->cluster != ClusterId())
continue;
@@ -564,9 +891,17 @@ void Arch::pack_cluster()
dump_clusters(chip_info, ctx);
for (uint32_t i = 0; i < chip_info->clusters.size(); ++i) {
- const auto &cluster = chip_info->clusters[i];
+ if (!chip_info->clusters[i].from_macro){
+ const auto &cluster = chip_info->clusters[i];
- prepare_cluster(&cluster, i);
+ prepare_cluster(&cluster, i);
+ } else {
+ const auto &cluster = chip_info->clusters[i];
+ if(ctx->verbose)
+ log_info("%s\n", IdString(cluster.name).c_str(ctx));
+
+ prepare_macro_cluster(&cluster, i);
+ }
}
}