aboutsummaryrefslogtreecommitdiffstats
path: root/fpga_interchange
diff options
context:
space:
mode:
authorAlessandro Comodi <acomodi@antmicro.com>2021-06-02 09:49:30 +0200
committerAlessandro Comodi <acomodi@antmicro.com>2021-06-11 11:19:01 +0200
commit104536b7aae5970ae1d1e95394f26fbf04603d12 (patch)
tree0f1fad9a952f272e6436456077fe54ba3a7730ea /fpga_interchange
parent7278d3c0edbc6f92ef4c69d7c5db66e811c7e9c4 (diff)
downloadnextpnr-104536b7aae5970ae1d1e95394f26fbf04603d12.tar.gz
nextpnr-104536b7aae5970ae1d1e95394f26fbf04603d12.tar.bz2
nextpnr-104536b7aae5970ae1d1e95394f26fbf04603d12.zip
interchange: add support for generating BEL clusters
Clustering greatly helps the placer to identify and pack together specific cells at the same site (e.g. LUT+FF), or cells that are chained through dedicated interconnections (e.g. CARRY CHAINS) Signed-off-by: Alessandro Comodi <acomodi@antmicro.com>
Diffstat (limited to 'fpga_interchange')
-rw-r--r--fpga_interchange/arch.cc1
-rw-r--r--fpga_interchange/arch.h55
-rw-r--r--fpga_interchange/arch_pack_clusters.cc584
-rw-r--r--fpga_interchange/chipdb.h28
-rw-r--r--fpga_interchange/examples/tests.cmake7
-rw-r--r--fpga_interchange/fpga_interchange.cpp6
-rw-r--r--fpga_interchange/site_router.cc46
-rw-r--r--fpga_interchange/site_routing_cache.cc8
-rw-r--r--fpga_interchange/site_routing_cache.h2
9 files changed, 713 insertions, 24 deletions
diff --git a/fpga_interchange/arch.cc b/fpga_interchange/arch.cc
index 8e7fe2a3..1bbe9a02 100644
--- a/fpga_interchange/arch.cc
+++ b/fpga_interchange/arch.cc
@@ -758,6 +758,7 @@ bool Arch::pack()
pack_ports();
pack_default_conns();
expand_macros();
+ pack_cluster();
return true;
}
diff --git a/fpga_interchange/arch.h b/fpga_interchange/arch.h
index c8a61430..83471167 100644
--- a/fpga_interchange/arch.h
+++ b/fpga_interchange/arch.h
@@ -94,6 +94,15 @@ struct TileStatus
PseudoPipModel pseudo_pip_model;
};
+struct Cluster
+{
+ uint32_t index;
+ CellInfo *root;
+ std::vector<CellInfo *> cluster_nodes;
+ dict<IdString, IdString> cell_cluster_node_map;
+ dict<IdString, std::vector<std::pair<IdString, CellInfo *>>> cluster_node_cells;
+};
+
struct Arch : ArchAPI<ArchRanges>
{
boost::iostreams::mapped_file_source blob_file;
@@ -258,6 +267,20 @@ struct Arch : ArchAPI<ArchRanges>
map_cell_pins(cell, mapping, /*bind_constants=*/false);
}
constraints.bindBel(tile_status.tags.data(), get_cell_constraints(bel, cell->type));
+
+ // Clean previous cell placement in tile
+ if (cell->bel != BelId()) {
+ TileStatus &prev_tile_status = get_tile_status(cell->bel.tile);
+ NPNR_ASSERT(prev_tile_status.boundcells[cell->bel.index] != nullptr);
+
+ const auto &prev_bel_data = bel_info(chip_info, cell->bel);
+ NPNR_ASSERT(prev_bel_data.category == BEL_CATEGORY_LOGIC);
+
+ get_site_status(prev_tile_status, prev_bel_data).unbindBel(cell);
+ prev_tile_status.boundcells[cell->bel.index] = nullptr;
+
+ constraints.unbindBel(prev_tile_status.tags.data(), get_cell_constraints(cell->bel, cell->type));
+ }
} else {
map_port_pins(bel, cell);
// FIXME: Probably need to actually constraint io port cell/bel,
@@ -687,7 +710,14 @@ struct Arch : ArchAPI<ArchRanges>
void place_iobufs(WireId pad_wire, NetInfo *net, const pool<CellInfo *, hash_ptr_ops> &tightly_attached_bels,
pool<CellInfo *, hash_ptr_ops> *placed_cells);
+
void pack_ports();
+
+ // Clusters
+ void pack_cluster();
+ void prepare_cluster(const ClusterPOD *cluster, uint32_t index);
+ dict<ClusterId, Cluster> clusters;
+
void decode_lut_cells();
const GlobalCellPOD *global_cell_info(IdString cell_type) const;
@@ -821,10 +851,10 @@ struct Arch : ArchAPI<ArchRanges>
}
const TileStatus &tile_status = iter->second;
const CellInfo *cell = tile_status.boundcells[bel.index];
+
if (cell != nullptr) {
- if (!dedicated_interconnect.isBelLocationValid(bel, cell)) {
+ if(cell->cluster == ClusterId() && !dedicated_interconnect.isBelLocationValid(bel, cell))
return false;
- }
if (io_port_types.count(cell->type)) {
// FIXME: Probably need to actually constraint io port cell/bel,
@@ -837,24 +867,21 @@ struct Arch : ArchAPI<ArchRanges>
return false;
}
}
+
// Still check site status if cell is nullptr; as other bels in the site could be illegal (for example when
// dedicated paths can no longer be used after ripping up a cell)
auto &bel_data = bel_info(chip_info, bel);
- return get_site_status(tile_status, bel_data).checkSiteRouting(getCtx(), tile_status);
- }
+ bool site_status = get_site_status(tile_status, bel_data).checkSiteRouting(getCtx(), tile_status);
- // -------------------------------------------------
+ return site_status;
+ }
- // TODO
- CellInfo *getClusterRootCell(ClusterId cluster) const override { NPNR_ASSERT_FALSE("unimplemented"); }
- ArcBounds getClusterBounds(ClusterId cluster) const override { NPNR_ASSERT_FALSE("unimplemented"); }
- Loc getClusterOffset(const CellInfo *cell) const override { NPNR_ASSERT_FALSE("unimplemented"); }
- bool isClusterStrict(const CellInfo *cell) const override { NPNR_ASSERT_FALSE("unimplemented"); }
+ CellInfo *getClusterRootCell(ClusterId cluster) const override;
+ ArcBounds getClusterBounds(ClusterId cluster) const override;
+ Loc getClusterOffset(const CellInfo *cell) const override;
+ bool isClusterStrict(const CellInfo *cell) const override;
bool getClusterPlacement(ClusterId cluster, BelId root_bel,
- std::vector<std::pair<CellInfo *, BelId>> &placement) const override
- {
- NPNR_ASSERT_FALSE("unimplemented");
- }
+ std::vector<std::pair<CellInfo *, BelId>> &placement) const override;
IdString get_bel_tiletype(BelId bel) const { return IdString(loc_info(chip_info, bel).name); }
diff --git a/fpga_interchange/arch_pack_clusters.cc b/fpga_interchange/arch_pack_clusters.cc
new file mode 100644
index 00000000..96940be4
--- /dev/null
+++ b/fpga_interchange/arch_pack_clusters.cc
@@ -0,0 +1,584 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2021 Symbiflow Authors
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "design_utils.h"
+#include "log.h"
+#include "nextpnr.h"
+#include "arch.h"
+#include "util.h"
+
+#include <boost/algorithm/string.hpp>
+#include <queue>
+
+NEXTPNR_NAMESPACE_BEGIN
+
+enum ClusterWireNodeState
+{
+ IN_SINK_SITE = 0,
+ IN_ROUTING = 1,
+ IN_SOURCE_SITE = 2,
+ ONLY_IN_SOURCE_SITE = 3
+};
+
+enum ExpansionDirection
+{
+ CLUSTER_UPHILL_DIR = 0,
+ CLUSTER_DOWNHILL_DIR = 1
+};
+
+struct ClusterWireNode {
+ WireId wire;
+ ClusterWireNodeState state;
+ int depth;
+};
+
+static void handle_expansion_node(const Context *ctx, WireId prev_wire, PipId pip, ClusterWireNode curr_node, std::vector<ClusterWireNode> &nodes_to_expand, pool<BelId> &bels, ExpansionDirection direction)
+{
+ WireId wire;
+
+ if (direction == CLUSTER_UPHILL_DIR)
+ wire = ctx->getPipSrcWire(pip);
+ else
+ wire = ctx->getPipDstWire(pip);
+
+ if (wire == WireId())
+ return;
+
+ ClusterWireNode next_node;
+ next_node.wire = wire;
+ next_node.depth = curr_node.depth;
+
+ if (next_node.depth >= 2)
+ return;
+
+ auto const &wire_data = ctx->wire_info(wire);
+
+ bool expand_node = true;
+ if (ctx->is_site_port(pip)) {
+ switch (curr_node.state) {
+ case ONLY_IN_SOURCE_SITE:
+ expand_node = false;
+ break;
+ case IN_SOURCE_SITE:
+ NPNR_ASSERT(wire_data.site == -1);
+ next_node.state = IN_ROUTING;
+ break;
+ case IN_ROUTING:
+ NPNR_ASSERT(wire_data.site != -1);
+ next_node.state = IN_SINK_SITE;
+ break;
+ case IN_SINK_SITE:
+ expand_node = false;
+ break;
+ default:
+ // Unreachable!!!
+ NPNR_ASSERT(false);
+ }
+ } else {
+ if (next_node.state == IN_ROUTING)
+ next_node.depth++;
+ next_node.state = curr_node.state;
+ }
+
+ if (expand_node)
+ nodes_to_expand.push_back(next_node);
+ else
+ return;
+
+ if (next_node.state == IN_SINK_SITE || next_node.state == ONLY_IN_SOURCE_SITE) {
+ for (BelPin bel_pin : ctx->getWireBelPins(wire)) {
+ BelId bel = bel_pin.bel;
+ auto const &bel_data = bel_info(ctx->chip_info, bel);
+
+ if (bels.count(bel))
+ continue;
+
+ if (bel_data.category != BEL_CATEGORY_LOGIC)
+ return;
+
+ if (bel_data.synthetic)
+ return;
+
+ if (direction == CLUSTER_UPHILL_DIR) {
+ // Check that the BEL is indeed the one reached by backward exploration,
+ // by checking the previous visited wire.
+ for (IdString check_pin : ctx->getBelPins(bel)) {
+ if (prev_wire == ctx->getBelPinWire(bel, check_pin)) {
+ bels.insert(bel);
+ break;
+ }
+ }
+ } else {
+ bels.insert(bel);
+ }
+ }
+ }
+
+ return;
+}
+
+
+static pool<BelId> find_cluster_bels(const Context *ctx, WireId wire, ExpansionDirection direction, bool allow_out_of_site_expansion = false)
+{
+ std::vector<ClusterWireNode> nodes_to_expand;
+ pool<BelId> bels;
+
+ const auto &wire_data = ctx->wire_info(wire);
+ NPNR_ASSERT(wire_data.site != -1);
+
+ ClusterWireNode wire_node;
+ wire_node.wire = wire;
+ wire_node.state = IN_SOURCE_SITE;
+ if (!allow_out_of_site_expansion)
+ wire_node.state = ONLY_IN_SOURCE_SITE;
+ wire_node.depth = 0;
+
+ nodes_to_expand.push_back(wire_node);
+
+ while (!nodes_to_expand.empty()) {
+ ClusterWireNode node_to_expand = nodes_to_expand.back();
+ WireId prev_wire = node_to_expand.wire;
+ nodes_to_expand.pop_back();
+
+ if (direction == CLUSTER_DOWNHILL_DIR) {
+ for (PipId pip : ctx->getPipsDownhill(node_to_expand.wire)) {
+ if (ctx->is_pip_synthetic(pip))
+ continue;
+
+ handle_expansion_node(ctx, prev_wire, pip, node_to_expand, nodes_to_expand, bels, direction);
+ }
+ } else {
+ NPNR_ASSERT(direction == CLUSTER_UPHILL_DIR);
+ for (PipId pip : ctx->getPipsUphill(node_to_expand.wire)) {
+ if (ctx->is_pip_synthetic(pip))
+ continue;
+
+ handle_expansion_node(ctx, prev_wire, pip, node_to_expand, nodes_to_expand, bels, direction);
+ }
+ }
+ }
+
+ return bels;
+}
+
+CellInfo* Arch::getClusterRootCell(ClusterId cluster) const
+{
+ NPNR_ASSERT(cluster != ClusterId());
+ return clusters.at(cluster).root;
+}
+
+bool Arch::getClusterPlacement(ClusterId cluster, BelId root_bel,
+ std::vector<std::pair<CellInfo *, BelId>> &placement) const
+{
+ const Context *ctx = getCtx();
+ const Cluster &packed_cluster = clusters.at(cluster);
+
+ IdString GND = id("GND");
+ IdString VCC = id("VCC");
+
+ // Place root
+ CellInfo *root_cell = getClusterRootCell(cluster);
+
+ if (!ctx->isValidBelForCellType(root_cell->type, root_bel))
+ return false;
+
+ BelId next_bel;
+
+ // Place cluster
+ for (CellInfo *cluster_node : packed_cluster.cluster_nodes) {
+ if (cluster_node == root_cell) {
+ next_bel = root_bel;
+ } else {
+ auto &cluster_data = cluster_info(chip_info, packed_cluster.index);
+
+ IdString next_bel_pin(cluster_data.chainable_ports[0].bel_source);
+ WireId next_bel_pin_wire = ctx->getBelPinWire(next_bel, next_bel_pin);
+ next_bel = BelId();
+ for (BelId bel : find_cluster_bels(ctx, next_bel_pin_wire, CLUSTER_DOWNHILL_DIR, true)) {
+ if (ctx->isValidBelForCellType(cluster_node->type, bel)) {
+ next_bel = bel;
+ break;
+ }
+ }
+
+ if (next_bel == BelId())
+ return false;
+ }
+
+ if (cluster_node->cell_bel_pins.empty()) {
+ int32_t mapping = bel_info(chip_info, next_bel).pin_map[get_cell_type_index(cluster_node->type)];
+ NPNR_ASSERT(mapping >= 0);
+
+ const CellBelMapPOD &cell_pin_map = chip_info->cell_map->cell_bel_map[mapping];
+ for (const auto &pin_map : cell_pin_map.common_pins) {
+ IdString cell_pin(pin_map.cell_pin);
+ IdString bel_pin(pin_map.bel_pin);
+
+ // Skip assigned LUT pins, as they are already mapped!
+ if (cluster_node->lut_cell.lut_pins.count(cell_pin) && cluster_node->cell_bel_pins.count(cell_pin))
+ continue;
+
+ if (cell_pin == GND || cell_pin == VCC)
+ continue;
+
+ cluster_node->cell_bel_pins[cell_pin].push_back(bel_pin);
+ }
+ }
+
+ placement.emplace_back(cluster_node, next_bel);
+
+ // Place cluster node cells
+ for (auto port_cell : packed_cluster.cluster_node_cells.at(cluster_node->name)) {
+ bool placed_cell = false;
+
+ IdString port = port_cell.first;
+ CellInfo *cell = port_cell.second;
+
+ PortType port_type = cluster_node->ports.at(port).type;
+
+ if (port_type == PORT_INOUT)
+ continue;
+
+ auto &cell_bel_pins = cluster_node->cell_bel_pins.at(port);
+ for (auto &bel_pin : cell_bel_pins) {
+ WireId bel_pin_wire = ctx->getBelPinWire(next_bel, bel_pin);
+
+ ExpansionDirection direction = port_type == PORT_IN ? CLUSTER_UPHILL_DIR : CLUSTER_DOWNHILL_DIR;
+ pool<BelId> cluster_bels = find_cluster_bels(ctx, bel_pin_wire, direction);
+
+ if (cluster_bels.size() == 0)
+ continue;
+
+ for (BelId bel : cluster_bels) {
+ if (ctx->isValidBelForCellType(cell->type, bel)) {
+ placement.emplace_back(cell, bel);
+ placed_cell = true;
+ break;
+ }
+ }
+
+ if (placed_cell)
+ break;
+ }
+
+ if (!placed_cell)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+ArcBounds Arch::getClusterBounds(ClusterId cluster) const
+{
+ // TODO: Implement this
+ ArcBounds bounds(0, 0, 0, 0);
+ return bounds;
+}
+
+Loc Arch::getClusterOffset(const CellInfo *cell) const
+{
+ Loc offset;
+ CellInfo *root = getClusterRootCell(cell->cluster);
+
+ if (cell->bel != BelId() && root->bel != BelId()) {
+ Loc root_loc = getBelLocation(root->bel);
+ Loc cell_loc = getBelLocation(cell->bel);
+ offset.x = cell_loc.x - root_loc.x;
+ offset.y = cell_loc.y - root_loc.y;
+ offset.z = cell_loc.z - root_loc.z;
+ } else {
+ Cluster cluster = clusters.at(cell->cluster);
+ auto &cluster_data = cluster_info(chip_info, cluster.index);
+
+ if (cluster_data.chainable_ports.size() == 0)
+ return offset;
+
+ auto &chainable_port = cluster_data.chainable_ports[0];
+
+ IdString cluster_node = cluster.cell_cluster_node_map.at(cell->name);
+ CellInfo *cluster_node_cell = cells.at(cluster_node).get();
+
+ auto res = std::find(cluster.cluster_nodes.begin(), cluster.cluster_nodes.end(), cluster_node_cell);
+ NPNR_ASSERT(res != cluster.cluster_nodes.end());
+
+ auto distance = std::distance(cluster.cluster_nodes.begin(), res);
+
+ offset.x = chainable_port.avg_x_offset * distance;
+ offset.y = chainable_port.avg_y_offset * distance;
+ }
+
+ return offset;
+}
+
+bool Arch::isClusterStrict(const CellInfo *cell) const
+{
+ return true;
+}
+
+void dump_clusters(const ChipInfoPOD *chip_info, Context *ctx)
+{
+ for (size_t i = 0; i < chip_info->clusters.size(); ++i) {
+ const auto &cluster = chip_info->clusters[i];
+ IdString cluster_name(cluster.name);
+ log_info("Cluster '%s' loaded! Parameters:\n", cluster_name.c_str(ctx));
+
+ log_info(" - root cell types:\n");
+ for (auto cell : cluster.root_cell_types)
+ log_info(" - %s\n", IdString(cell).c_str(ctx));
+
+ for (auto chain_ports : cluster.chainable_ports)
+ log_info(" - chainable pair: source %s - sink %s\n",
+ IdString(chain_ports.cell_source).c_str(ctx),
+ IdString(chain_ports.cell_sink).c_str(ctx));
+
+ if (cluster.cluster_cells_map.size() != 0)
+ log_info(" - cell port maps:\n");
+ for (auto cluster_cell : cluster.cluster_cells_map) {
+ log_info(" - cell: %s - port: %s\n",
+ IdString(cluster_cell.cell).c_str(ctx),
+ IdString(cluster_cell.port).c_str(ctx));
+ }
+ }
+}
+
+
+static bool check_cluster_cells_compatibility(CellInfo *old_cell, CellInfo *new_cell, pool<IdString> &exclude_nets)
+{
+ NPNR_ASSERT(new_cell->type == old_cell->type);
+ for (auto &new_port_pair : new_cell->ports) {
+ PortInfo new_port_info = new_port_pair.second;
+ PortInfo old_port_info = old_cell->ports.at(new_port_pair.first);
+
+ if (exclude_nets.count(new_port_info.net->name))
+ continue;
+
+ if (new_port_info.type != PORT_IN)
+ continue;
+
+ if (new_port_info.net != old_port_info.net)
+ return false;
+ }
+
+ return true;
+}
+
+
+void Arch::prepare_cluster(const ClusterPOD *cluster, uint32_t index)
+{
+ Context *ctx = getCtx();
+ IdString cluster_name(cluster->name);
+
+ pool<IdString> cluster_cell_types;
+ for (auto cell_type : cluster->root_cell_types)
+ cluster_cell_types.insert(IdString(cell_type));
+
+ // Find cluster roots
+ std::vector<CellInfo *> roots;
+ for (auto &cell : cells) {
+ CellInfo *ci = cell.second.get();
+
+ if (ci->cluster != ClusterId())
+ continue;
+
+ if (!cluster_cell_types.count(ci->type))
+ continue;
+
+ if (cluster->chainable_ports.size() == 0) {
+ ci->cluster.set(ctx, ci->name.str(ctx));
+ roots.push_back(ci);
+ continue;
+ }
+
+ // Only one type of dedicated interconnect is allowed.
+ auto chain_ports = cluster->chainable_ports[0];
+ IdString source_port(chain_ports.cell_source);
+ IdString sink_port(chain_ports.cell_sink);
+
+ PortRef driver = ci->ports[sink_port].net->driver;
+
+ if (driver.cell == nullptr || driver.port != source_port) {
+ // We hit a root cell
+ ci->cluster.set(ctx, ci->name.c_str(ctx));
+ roots.push_back(ci);
+
+ // Chained cells use dedicated connections, usually not exposed to the
+ // general interconnect resources. The port disconnection is required for
+ // sink ports which are connected to GND or VCC by default, which are not
+ // reachable due to the fixed dedicated interconnect.
+ // E.g.: The CI input of carry chains in 7series corresponds to the CIN bel port,
+ // which can only be connected to the COUT output of the tile below.
+ disconnect_port(ctx, ci, sink_port);
+ }
+ }
+
+ dict<IdString, pool<IdString>> port_cell_maps;
+ for (auto cell_port_map : cluster->cluster_cells_map) {
+ IdString cell(cell_port_map.cell);
+ IdString port(cell_port_map.port);
+
+ pool<IdString> cells_pool({cell});
+
+ port_cell_maps.emplace(port, cells_pool).first->second.insert(cell);
+ }
+
+ // Generate unique clusters starting from each root
+ for (auto root : roots) {
+ Cluster cluster_info;
+ cluster_info.root = root;
+ cluster_info.index = index;
+
+ CellInfo *next_cluster_node = root;
+ if (ctx->verbose)
+ log_info(" - forming cluster starting from root cell: %s\n", next_cluster_node->name.c_str(ctx));
+
+ // counter to determine whether this cluster needs to exist
+ uint32_t count_cluster_cells = 0;
+ do {
+ std::vector<std::pair<IdString, CellInfo *>> cluster_cells;
+
+ // type -> cells map to verify compatibility of cells in the same cluster
+ dict<IdString, CellInfo *> cell_type_dict;
+ pool<IdString> exclude_nets;
+
+ count_cluster_cells++;
+
+ for (auto port : next_cluster_node->ports) {
+ if (!port_cell_maps.count(port.first))
+ continue;
+
+ PortInfo port_info = port.second;
+
+ if (port_info.type == PORT_OUT) {
+ exclude_nets.insert(port_info.net->name);
+ auto &users = port_info.net->users;
+ if (users.size() != 1)
+ continue;
+
+ CellInfo *user_cell = users[0].cell;
+ if (user_cell == nullptr)
+ continue;
+
+ if (!port_cell_maps.at(port.first).count(user_cell->type))
+ continue;
+
+ auto res = cell_type_dict.emplace(user_cell->type, user_cell);
+ bool compatible = true;
+ if (!res.second)
+ compatible = check_cluster_cells_compatibility(res.first->second, user_cell, exclude_nets);
+
+ if (!compatible) {
+ log_info("Not compatible! %s %s\n", user_cell->name.c_str(ctx), port_info.net->name.c_str(ctx));
+ continue;
+ }
+
+ user_cell->cluster = root->cluster;
+ cluster_cells.push_back(std::make_pair(port.first, user_cell));
+ cluster_info.cell_cluster_node_map.emplace(user_cell->name, next_cluster_node->name);
+ count_cluster_cells++;
+
+ if (ctx->verbose)
+ log_info(" - adding user cell: %s\n", user_cell->name.c_str(ctx));
+
+
+ } else if (port_info.type == PORT_IN) {
+ auto &driver = port_info.net->driver;
+ auto &users = port_info.net->users;
+ if (users.size() != 1)
+ continue;
+
+ CellInfo *driver_cell = driver.cell;
+ if (driver_cell == nullptr)
+ continue;
+
+ if (!port_cell_maps.at(port.first).count(driver_cell->type))
+ continue;
+
+ driver_cell->cluster = root->cluster;
+ cluster_cells.push_back(std::make_pair(port.first, driver_cell));
+ cluster_info.cell_cluster_node_map.emplace(driver_cell->name, next_cluster_node->name);
+ count_cluster_cells++;
+
+ if (ctx->verbose)
+ log_info(" - adding driver cell: %s\n", driver_cell->name.c_str(ctx));
+ }
+ }
+
+ cluster_info.cell_cluster_node_map.emplace(next_cluster_node->name, next_cluster_node->name);
+ cluster_info.cluster_nodes.push_back(next_cluster_node);
+ cluster_info.cluster_node_cells.emplace(next_cluster_node->name, cluster_cells);
+
+ if (cluster->chainable_ports.size() == 0)
+ break;
+
+ // Only one type of dedicated interconnect is allowed.
+ auto chain_ports = cluster->chainable_ports[0];
+ IdString source_port(chain_ports.cell_source);
+ IdString sink_port(chain_ports.cell_sink);
+
+ NetInfo *next_net = next_cluster_node->ports.at(source_port).net;
+
+ if (next_net == nullptr)
+ continue;
+
+ next_cluster_node = nullptr;
+ for (auto &user : next_net->users) {
+ CellInfo *user_cell = user.cell;
+
+ if (user_cell == nullptr)
+ continue;
+
+ if (cluster_cell_types.count(user_cell->type)) {
+ user_cell->cluster = root->cluster;
+ next_cluster_node = user_cell;
+ break;
+ }
+ }
+
+ if (next_cluster_node == nullptr)
+ break;
+
+ } while (true);
+
+ if (count_cluster_cells == 1 && cluster->chainable_ports.size() == 0) {
+ root->cluster = ClusterId();
+ continue;
+ }
+
+ clusters.emplace(root->cluster, cluster_info);
+ }
+}
+
+void Arch::pack_cluster()
+{
+ Context *ctx = getCtx();
+
+ if (ctx->verbose)
+ dump_clusters(chip_info, ctx);
+
+ for (uint32_t i = 0; i < chip_info->clusters.size(); ++i) {
+ const auto &cluster = chip_info->clusters[i];
+
+ // Build clusters and find roots
+ prepare_cluster(&cluster, i);
+ }
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/fpga_interchange/chipdb.h b/fpga_interchange/chipdb.h
index 155c2bb2..78d9c1c5 100644
--- a/fpga_interchange/chipdb.h
+++ b/fpga_interchange/chipdb.h
@@ -402,6 +402,27 @@ NPNR_PACKED_STRUCT(struct MacroExpansionPOD {
RelSlice<MacroParamMapRulePOD> param_rules;
});
+NPNR_PACKED_STRUCT(struct ClusterCellPortPOD {
+ uint32_t cell;
+ uint32_t port;
+});
+
+NPNR_PACKED_STRUCT(struct ChainablePortPOD {
+ uint32_t cell_source;
+ uint32_t cell_sink;
+ uint32_t bel_source;
+ uint32_t bel_sink;
+ int16_t avg_x_offset;
+ int16_t avg_y_offset;
+});
+
+NPNR_PACKED_STRUCT(struct ClusterPOD {
+ uint32_t name;
+ RelSlice<uint32_t> root_cell_types;
+ RelSlice<ChainablePortPOD> chainable_ports;
+ RelSlice<ClusterCellPortPOD> cluster_cells_map;
+});
+
NPNR_PACKED_STRUCT(struct ChipInfoPOD {
RelPtr<char> name;
RelPtr<char> generator;
@@ -421,6 +442,8 @@ NPNR_PACKED_STRUCT(struct ChipInfoPOD {
RelSlice<MacroPOD> macros;
RelSlice<MacroExpansionPOD> macro_rules;
+ RelSlice<ClusterPOD> clusters;
+
// BEL bucket constids.
RelSlice<int32_t> bel_buckets;
@@ -460,6 +483,11 @@ inline const SiteInstInfoPOD &site_inst_info(const ChipInfoPOD *chip_info, int32
return chip_info->sites[chip_info->tiles[tile].sites[site]];
}
+inline const ClusterPOD &cluster_info(const ChipInfoPOD *chip_info, int32_t cluster)
+{
+ return chip_info->clusters[cluster];
+}
+
enum SyntheticType
{
NOT_SYNTH = 0,
diff --git a/fpga_interchange/examples/tests.cmake b/fpga_interchange/examples/tests.cmake
index 3c97fe26..48b1cee3 100644
--- a/fpga_interchange/examples/tests.cmake
+++ b/fpga_interchange/examples/tests.cmake
@@ -77,13 +77,14 @@ function(add_interchange_test)
# Synthesis
set(synth_json ${CMAKE_CURRENT_BINARY_DIR}/${name}.json)
+ set(synth_log ${CMAKE_CURRENT_BINARY_DIR}/${name}.json.log)
add_custom_command(
OUTPUT ${synth_json}
COMMAND ${CMAKE_COMMAND} -E env
SOURCES="${sources}"
OUT_JSON=${synth_json}
TECHMAP=${techmap}
- yosys -c ${tcl}
+ yosys -c ${tcl} -l ${synth_log}
DEPENDS ${sources} ${techmap} ${tcl}
)
@@ -134,6 +135,7 @@ function(add_interchange_test)
get_property(chipdb_bin_loc TARGET device-${device} PROPERTY CHIPDB_BIN_LOC)
set(phys ${CMAKE_CURRENT_BINARY_DIR}/${name}.phys)
+ set(phys_log ${CMAKE_CURRENT_BINARY_DIR}/${name}.phys.log)
add_custom_command(
OUTPUT ${phys}
COMMAND
@@ -143,6 +145,7 @@ function(add_interchange_test)
--netlist ${netlist}
--phys ${phys}
--package ${package}
+ --log ${phys_log}
DEPENDS
nextpnr-fpga_interchange
${netlist}
@@ -151,6 +154,7 @@ function(add_interchange_test)
${chipdb_bin_loc}
)
+ set(phys_verbose_log ${CMAKE_CURRENT_BINARY_DIR}/${name}.phys.verbose.log)
add_custom_target(
test-${family}-${name}-phys-verbose
COMMAND
@@ -161,6 +165,7 @@ function(add_interchange_test)
--phys ${phys}
--package ${package}
--verbose
+ --log ${phys_verbose_log}
DEPENDS
${netlist}
${xdc}
diff --git a/fpga_interchange/fpga_interchange.cpp b/fpga_interchange/fpga_interchange.cpp
index 1d08b128..ac2a7e96 100644
--- a/fpga_interchange/fpga_interchange.cpp
+++ b/fpga_interchange/fpga_interchange.cpp
@@ -539,10 +539,14 @@ void FpgaInterchange::write_physical_netlist(const Context * ctx, const std::str
auto net_iter = nets.begin();
for(auto & net_pair : ctx->nets) {
auto &net = *net_pair.second;
- auto net_out = *net_iter++;
const CellInfo *driver_cell = net.driver.cell;
+ if (driver_cell == nullptr)
+ continue;
+
+ auto net_out = *net_iter++;
+
// Handle GND and VCC nets.
if(driver_cell->bel == ctx->get_gnd_bel()) {
IdString gnd_net_name(ctx->chip_info->constants->gnd_net_name);
diff --git a/fpga_interchange/site_router.cc b/fpga_interchange/site_router.cc
index 090b9342..cae81d52 100644
--- a/fpga_interchange/site_router.cc
+++ b/fpga_interchange/site_router.cc
@@ -60,7 +60,9 @@ bool check_initial_wires(const Context *ctx, SiteInformation *site_info)
if (!cell->ports.count(pin_pair.first))
continue;
const PortInfo &port = cell->ports.at(pin_pair.first);
- NPNR_ASSERT(port.net != nullptr);
+
+ if (port.net == nullptr)
+ continue;
for (IdString bel_pin_name : pin_pair.second) {
BelPin bel_pin;
@@ -297,7 +299,11 @@ struct SiteExpansionLoop
// already unroutable!
solution.clear();
solution.store_solution(ctx, node_storage, net->driver, completed_routes);
- solution.verify(ctx, *net);
+ bool verify = solution.verify(ctx, *net);
+
+ if (!verify)
+ return false;
+
for (size_t route : completed_routes) {
SiteWire wire = node_storage->get_node(route)->wire;
targets.erase(wire);
@@ -1086,6 +1092,40 @@ static void block_lut_outputs(SiteArch *site_arch, const pool<std::pair<IdString
}
}
+// Block outputs of unavailable LUTs to prevent site router from using them.
+static void block_cluster_wires(SiteArch *site_arch)
+{
+ const Context *ctx = site_arch->site_info->ctx;
+ auto &cells_in_site = site_arch->site_info->cells_in_site;
+
+ for (auto &cell : cells_in_site) {
+ if (cell->cluster == ClusterId())
+ continue;
+
+ if (ctx->getClusterRootCell(cell->cluster) != cell)
+ continue;
+
+ Cluster cluster = ctx->clusters.at(cell->cluster);
+
+ uint32_t cluster_id = cluster.index;
+ auto &cluster_data = cluster_info(ctx->chip_info, cluster_id);
+
+ if (cluster_data.chainable_ports.size() == 0)
+ continue;
+
+ IdString cluster_chain_input(cluster_data.chainable_ports[0].cell_sink);
+
+ if (cluster_chain_input == IdString())
+ continue;
+
+ auto &cell_bel_pins = cell->cell_bel_pins.at(cluster_chain_input);
+ for (auto &bel_pin : cell_bel_pins) {
+ SiteWire bel_pin_wire = site_arch->getBelPinWire(cell->bel, bel_pin);
+ site_arch->bindWire(bel_pin_wire, &site_arch->blocking_site_net);
+ }
+ }
+}
+
// Recursively visit downhill PIPs until a SITE_PORT_SINK is reached.
// Marks all PIPs for all valid paths.
static bool visit_downhill_pips(const SiteArch *site_arch, const SiteWire &site_wire, std::vector<PipId> &valid_pips)
@@ -1205,6 +1245,7 @@ bool SiteRouter::checkSiteRouting(const Context *ctx, const TileStatus &tile_sta
// site_arch.archcheck();
block_lut_outputs(&site_arch, blocked_wires);
+ block_cluster_wires(&site_arch);
// Do a detailed routing check to see if the site has at least 1 valid
// routing solution.
@@ -1264,6 +1305,7 @@ void SiteRouter::bindSiteRouting(Context *ctx)
SiteArch site_arch(&site_info);
block_lut_outputs(&site_arch, blocked_wires);
+ block_cluster_wires(&site_arch);
NPNR_ASSERT(route_site(&site_arch, &ctx->site_routing_cache, &ctx->node_storage, /*explain=*/false));
check_routing(site_arch);
diff --git a/fpga_interchange/site_routing_cache.cc b/fpga_interchange/site_routing_cache.cc
index 512ca2ac..cd16cfff 100644
--- a/fpga_interchange/site_routing_cache.cc
+++ b/fpga_interchange/site_routing_cache.cc
@@ -68,7 +68,7 @@ void SiteRoutingSolution::store_solution(const SiteArch *ctx, const RouteNodeSto
solution_offsets.push_back(solution_storage.size());
}
-void SiteRoutingSolution::verify(const SiteArch *ctx, const SiteNetInfo &net)
+bool SiteRoutingSolution::verify(const SiteArch *ctx, const SiteNetInfo &net)
{
pool<SiteWire> seen_users;
for (size_t i = 0; i < num_solutions(); ++i) {
@@ -88,7 +88,7 @@ void SiteRoutingSolution::verify(const SiteArch *ctx, const SiteNetInfo &net)
NPNR_ASSERT(net.driver == cursor);
}
- NPNR_ASSERT(seen_users.size() == net.users.size());
+ return seen_users.size() == net.users.size();
}
SiteRoutingKey SiteRoutingKey::make(const SiteArch *ctx, const SiteNetInfo &site_net)
@@ -194,9 +194,7 @@ bool SiteRoutingCache::get_solution(const SiteArch *ctx, const SiteNetInfo &net,
}
}
- solution->verify(ctx, net);
-
- return true;
+ return solution->verify(ctx, net);
}
void SiteRoutingCache::add_solutions(const SiteArch *ctx, const SiteNetInfo &net, const SiteRoutingSolution &solution)
diff --git a/fpga_interchange/site_routing_cache.h b/fpga_interchange/site_routing_cache.h
index b4baf65a..a430d206 100644
--- a/fpga_interchange/site_routing_cache.h
+++ b/fpga_interchange/site_routing_cache.h
@@ -32,7 +32,7 @@ struct SiteRoutingSolution
{
void store_solution(const SiteArch *ctx, const RouteNodeStorage *node_storage, const SiteWire &driver,
std::vector<size_t> solutions);
- void verify(const SiteArch *ctx, const SiteNetInfo &net);
+ bool verify(const SiteArch *ctx, const SiteNetInfo &net);
void clear()
{