#!/usr/bin/env python3 # # Copyright (c) 2018 Yousong Zhou # # This is free software, licensed under the GNU General Public License v2. # See /LICENSE for more information. import argparse import calendar import datetime import errno import fcntl import hashlib import json import os import os.path import re import shutil import ssl import subprocess import sys import time import urllib.request TMPDIR = os.environ.get('TMP_DIR') or '/tmp' TMPDIR_DL = os.path.join(TMPDIR, 'dl') class PathException(Exception): pass class DownloadGitHubError(Exception): pass class Path(object): """Context class for preparing and cleaning up directories. If ```preclean` is ``False``, ``path`` will NOT be removed on context enter If ``path`` ``isdir``, then it will be created on context enter. If ``keep`` is True, then ``path`` will NOT be removed on context exit """ def __init__(self, path, isdir=True, preclean=False, keep=False): self.path = path self.isdir = isdir self.preclean = preclean self.keep = keep def __enter__(self): if self.preclean: self.rm_all(self.path) if self.isdir: self.mkdir_all(self.path) return self def __exit__(self, exc_type, exc_value, traceback): if not self.keep: self.rm_all(self.path) @staticmethod def mkdir_all(path): """Same as mkdir -p.""" names = os.path.split(path) p = '' for name in names: p = os.path.join(p, name) Path._mkdir(p) @staticmethod def _rmdir_dir(dir_): names = Path._listdir(dir_) for name in names: p = os.path.join(dir_, name) Path.rm_all(p) Path._rmdir(dir_) @staticmethod def _mkdir(path): Path._os_func(os.mkdir, path, errno.EEXIST) @staticmethod def _rmdir(path): Path._os_func(os.rmdir, path, errno.ENOENT) @staticmethod def _remove(path): Path._os_func(os.remove, path, errno.ENOENT) @staticmethod def _listdir(path): return Path._os_func(os.listdir, path, errno.ENOENT, default=[]) @staticmethod def _os_func(func, path, errno, default=None): """Call func(path) in an idempotent way. On exception ``ex``, if the type is OSError and ``ex.errno == errno``, return ``default``, otherwise, re-raise """ try: return func(path) except OSError as e: if e.errno == errno: return default else: raise @staticmethod def rm_all(path): """Same as rm -r.""" if os.path.islink(path): Path._remove(path) elif os.path.isdir(path): Path._rmdir_dir(path) else: Path._remove(path) @staticmethod def untar(path, into=None): """Extract tarball at ``path`` into subdir ``into``. return subdir name if and only if there exists one, otherwise raise PathException """ args = ('tar', '-C', into, '-xzf', path, '--no-same-permissions') subprocess.check_call(args, preexec_fn=lambda: os.umask(0o22)) dirs = os.listdir(into) if len(dirs) == 1: return dirs[0] else: raise PathException('untar %s: expecting a single subdir, got %s' % (path, dirs)) @staticmethod def tar(path, subdir, into=None, ts=None): """Pack ``path`` into tarball ``into``.""" # --sort=name requires a recent build of GNU tar args = ['tar', '--numeric-owner', '--owner=0', '--group=0', '--sort=name', '--mode=a-s'] args += ['-C', path, '-cf', into, subdir] envs = os.environ.copy() if ts is not None: args.append('--mtime=@%d' % ts) if into.endswith('.xz'): envs['XZ_OPT'] = '-7e' args.append('-J') elif into.endswith('.bz2'): args.append('-j') elif into.endswith('.gz'): args.append('-z') envs['GZIP'] = '-n' else: raise PathException('unknown compression type %s' % into) subprocess.check_call(args, env=envs) class GitHubCommitTsCache(object): __cachef = 'github.commit.ts.cache' __cachen = 2048 def __init__(self): Path.mkdir_all(TMPDIR_DL) self.cachef = os.path.join(TMPDIR_DL, self.__cachef) self.cache = {} def get(self, k): """Get timestamp with key ``k``.""" fileno = os.open(self.cachef, os.O_RDONLY | os.O_CREAT) with os.fdopen(fileno) as fin: try: fcntl.lockf(fileno, fcntl.LOCK_SH) self._cache_init(fin) if k in self.cache: ts = self.cache[k][0] return ts finally: fcntl.lockf(fileno, fcntl.LOCK_UN) return None def set(self, k, v): """Update timestamp with ``k``.""" fileno = os.open(self.cachef, os.O_RDWR | os.O_CREAT) with os.fdopen(fileno, 'w+') as f: try: fcntl.lockf(fileno, fcntl.LOCK_EX) self._cache_init(f) self.cache[k] = (v, int(time.time())) self._cache_flush(f) finally: fcntl.lockf(fileno, fcntl.LOCK_UN) def _cache_init(self, fin): for line in fin: k, ts, updated = line.split() ts = int(ts) updated = int(updated) self.cache[k] = (ts, updated) def _cache_flush(self, fout): cache = sorted(self.cache.items(), key=lambda a: a[1][1]) cache = cache[:self.__cachen] self.cache = {} os.ftruncate(fout.fileno(), 0) fout.seek(0, os.SEEK_SET) for k, ent in cache: ts = ent[0] updated = ent[1] line = '{0} {1} {2}\n'.format(k, ts, updated) fout.write(line) class DownloadGitHubTarball(object): """Download and repack archive tarball from GitHub. Compared with the method of packing after cloning the whole repo, this method is more friendly to users with fragile internet connection. However, there are limitations with this method - GitHub imposes a 60 reqs/hour limit for unauthenticated API access. This affects fetching commit date for reproducible tarballs. Download through the archive link is not affected. - GitHub archives do not contain source codes for submodules. - GitHub archives seem to respect .gitattributes and ignore paths with export-ignore attributes. For the first two issues, the method will fail loudly to allow fallback to clone-then-pack method. As for the 3rd issue, to make sure that this method only produces identical tarballs as the fallback method, we require the expected hash value to be supplied. That means the first tarball will need to be prepared by the clone-then-pack method """ __repo_url_regex = re.compile(r'^(?:https|git)://github.com/(?P[^/]+)/(?P[^/]+)') def __init__(self, args): self.dl_dir = args.dl_dir self.version = args.version self.subdir = args.subdir self.source = args.source self.submodules = args.submodules self.url = args.url self._init_owner_repo() self.xhash = args.hash self._init_hasher() self.commit_ts = None # lazy load commit timestamp self.commit_ts_cache = GitHubCommitTsCache() self.name = 'github-tarball' def download(self): """Download and repack GitHub archive tarball.""" if self.submodules and self.submodules != ['skip']: raise self._error('Fetching submodules is not yet supported') self._init_commit_ts() with Path(TMPDIR_DL, keep=True) as dir_dl: # fetch tarball from GitHub tarball_path = os.path.join(dir_dl.path, self.subdir + '.tar.gz.dl') with Path(tarball_path, isdir=False): self._fetch(tarball_path) # unpack d = os.path.join(dir_dl.path, self.subdir + '.untar') with Path(d, preclean=True) as dir_untar: tarball_prefix = Path.untar(tarball_path, into=dir_untar.path) dir0 = os.path.join(dir_untar.path, tarball_prefix) dir1 = os.path.join(dir_untar.path, self.subdir) # submodules check if self.submodules != ['skip'] and self._has_submodule(dir0): raise self._error('Fetching submodules is not yet supported') # rename subdir os.rename(dir0, dir1) # repack into=os.path.join(TMPDIR_DL, self.source) Path.tar(dir_untar.path, self.subdir, into=into, ts=self.commit_ts) try: self._hash_check(into) except Exception: Path.rm_all(into) raise # move to target location file1 = os.path.join(self.dl_dir, self.source) if into != file1: shutil.move(into, file1) def _has_submodule(self, dir_): m = os.path.join(dir_, '.gitmodules') try: st = os.stat(m) return st.st_size > 0 except OSError as e: return e.errno != errno.ENOENT def _init_owner_repo(self): m = self.__repo_url_regex.search(self.url) if m is None: raise self._error('Invalid github url: {}'.format(self.url)) owner = m.group('owner') repo = m.group('repo') if repo.endswith('.git'): repo = repo[:-4] self.owner = owner self.repo = repo def _init_hasher(self): xhash = self.xhash if len(xhash) == 64: self.hasher = hashlib.sha256() elif len(xhash) == 32: self.hasher = hashlib.md5() else: raise self._error('Requires sha256sum for verification') self.xhash = xhash def _hash_check(self, f): with open(f, 'rb') as fin: while True: d = fin.read(4096) if not d: break self.hasher.update(d) xhash = self.hasher.hexdigest() if xhash != self.xhash: raise self._error('Wrong hash (probably caused by .gitattributes), expecting {}, got {}'.format(self.xhash, xhash)) def _init_commit_ts(self): if self.commit_ts is not None: return # GitHub provides 2 APIs[1,2] for fetching commit data. API[1] is more # terse while API[2] provides more verbose info such as commit diff # etc. That's the main reason why API[1] is preferred: the response # size is predictable. # # However, API[1] only accepts complete commit sha1sum as the parameter # while API[2] is more liberal accepting also partial commit id and # tags, etc. # # [1] Get a single commit, Repositories, https://developer.github.com/v3/repos/commits/#get-a-single-commit # [2] Git Commits, Git Data, https://developer.github.com/v3/git/commits/#get-a-commit apis = [ { 'url': self._make_repo_url_path('git', 'commits', self.version), 'attr_path': ('committer', 'date'), }, { 'url': self._make_repo_url_path('commits', self.version), 'attr_path': ('commit', 'committer', 'date'), }, ] version_is_sha1sum = len(self.version) == 40 if not version_is_sha1sum: apis.insert(0, apis.pop()) reasons = '' for api in apis: url = api['url'] attr_path = api['attr_path'] try: ct = self.commit_ts_cache.get(url) if ct is not None: self.commit_ts = ct return ct = self._init_commit_ts_remote_get(url, attr_path) self.commit_ts = ct self.commit_ts_cache.set(url, ct) return except Exception as e: reasons += '\n' + (" {}: {}".format(url, e)) raise self._error('Cannot fetch commit ts:{}'.format(reasons)) def _init_commit_ts_remote_get(self, url, attrpath): resp = self._make_request(url) data = resp.read() date = json.loads(data) for attr in attrpath: date = date[attr] date = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%SZ') date = date.timetuple() ct = calendar.timegm(date) return ct def _fetch(self, path): """Fetch tarball of the specified version ref.""" ref = self.version url = self._make_repo_url_path('tarball', ref) resp = self._make_request(url) with open(path, 'wb') as fout: while True: d = resp.read(4096) if not d: break fout.write(d) def _make_repo_url_path(self, *args): url = '/repos/{0}/{1}'.format(self.owner, self.repo) if args: url += '/' + '/'.join(args) return url def _make_request(self, path): """Request GitHub API endpoint on ``path``.""" url = 'https://api.github.com' + path headers = { 'Accept': 'application/vnd.github.v3+json', 'User-Agent': 'OpenWrt', } req = urllib.request.Request(url, headers=headers) sslcontext = ssl._create_unverified_context() fileobj = urllib.request.urlopen(req, context=sslcontext) return fileobj def _error(self, msg): return DownloadGitHubError('{}: {}'.format(self.source, msg)) def main(): parser = argparse.ArgumentParser() parser.add_argument('--dl-dir', default=os.getcwd(), help='Download dir') parser.add_argument('--url', help='Download URL') parser.add_argument('--subdir', help='Source code subdir name') parser.add_argument('--version', help='Source code version') parser.add_argument('--source', help='Source tarball filename') parser.add_argument('--hash', help='Source tarball\'s expected sha256sum') parser.add_argument('--submodules', nargs='*', help='List of submodules, or "skip"') args = parser.parse_args() try: method = DownloadGitHubTarball(args) method.download() except Exception as ex: sys.stderr.write('{}: Download from {} failed\n'.format(args.source, args.url)) sys.stderr.write('{}\n'.format(ex)) sys.exit(1) if __name__ == '__main__': main() d='n360' href='#n360'>360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
/*
 *  yosys -- Yosys Open SYnthesis Suite
 *
 *  Copyright (C) 2012  Clifford Wolf <clifford@clifford.at>
 *
 *  Permission to use, copy, modify, and/or distribute this software for any
 *  purpose with or without fee is hereby granted, provided that the above
 *  copyright notice and this permission notice appear in all copies.
 *
 *  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 *  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 *  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 *  ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 *  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 *  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 *  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 *
 */

#include "kernel/register.h"
#include "kernel/celltypes.h"
#include "kernel/consteval.h"
#include "kernel/sigtools.h"
#include "kernel/log.h"
#include "kernel/satgen.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <algorithm>

USING_YOSYS_NAMESPACE
PRIVATE_NAMESPACE_BEGIN

bool inv_mode;
int verbose_level, reduce_counter, reduce_stop_at;
typedef std::map<RTLIL::SigBit, std::pair<RTLIL::Cell*, std::set<RTLIL::SigBit>>> drivers_t;
std::string dump_prefix;

struct equiv_bit_t
{
	int depth;
	bool inverted;
	RTLIL::Cell *drv;
	RTLIL::SigBit bit;

	bool operator<(const equiv_bit_t &other) const {
		if (depth != other.depth)
			return depth < other.depth;
		if (inverted != other.inverted)
			return inverted < other.inverted;
		if (drv != other.drv)
			return drv < other.drv;
		return bit < other.bit;
	}
};

struct CountBitUsage
{
	SigMap &sigmap;
	std::map<RTLIL::SigBit, int> &cache;

	CountBitUsage(SigMap &sigmap, std::map<RTLIL::SigBit, int> &cache) : sigmap(sigmap), cache(cache) { }

	void operator()(RTLIL::SigSpec &sig) {
		std::vector<RTLIL::SigBit> vec = sigmap(sig).to_sigbit_vector();
		for (auto &bit : vec)
			cache[bit]++;
	}
};

struct FindReducedInputs
{
	SigMap &sigmap;
	drivers_t &drivers;

	ezSatPtr ez;
	std::set<RTLIL::Cell*> ez_cells;
	SatGen satgen;

	std::map<RTLIL::SigBit, int> sat_pi;
	std::vector<int> sat_pi_uniq_bitvec;

	FindReducedInputs(SigMap &sigmap, drivers_t &drivers) :
			sigmap(sigmap), drivers(drivers), satgen(ez.get(), &sigmap)
	{
		satgen.model_undef = true;
	}

	int get_bits(int val)
	{
		int bits = 0;
		for (int i = 8*sizeof(int); val; i = i >> 1)
			if (val >> (i-1)) {
				bits += i;
				val = val >> i;
			}
		return bits;
	}

	void register_pi_bit(RTLIL::SigBit bit)
	{
		if (sat_pi.count(bit) != 0)
			return;

		satgen.setContext(&sigmap, "A");
		int sat_a = satgen.importSigSpec(bit).front();
		ez->assume(ez->NOT(satgen.importUndefSigSpec(bit).front()));

		satgen.setContext(&sigmap, "B");
		int sat_b = satgen.importSigSpec(bit).front();
		ez->assume(ez->NOT(satgen.importUndefSigSpec(bit).front()));

		int idx = sat_pi.size();
		size_t idx_bits = get_bits(idx);

		if (sat_pi_uniq_bitvec.size() != idx_bits) {
			sat_pi_uniq_bitvec.push_back(ez->frozen_literal(stringf("uniq_%d", int(idx_bits)-1)));
			for (auto &it : sat_pi)
				ez->assume(ez->OR(ez->NOT(it.second), ez->NOT(sat_pi_uniq_bitvec.back())));
		}
		log_assert(sat_pi_uniq_bitvec.size() == idx_bits);

		sat_pi[bit] = ez->frozen_literal(stringf("p, falsei_%s", log_signal(bit)));
		ez->assume(ez->IFF(ez->XOR(sat_a, sat_b), sat_pi[bit]));

		for (size_t i = 0; i < idx_bits; i++)
			if ((idx & (1 << i)) == 0)
				ez->assume(ez->OR(ez->NOT(sat_pi[bit]), ez->NOT(sat_pi_uniq_bitvec[i])));
			else
				ez->assume(ez->OR(ez->NOT(sat_pi[bit]), sat_pi_uniq_bitvec[i]));
	}

	void register_cone_worker(std::set<RTLIL::SigBit> &pi, std::set<RTLIL::SigBit> &sigdone, RTLIL::SigBit out)
	{
		if (out.wire == NULL)
			return;
		if (sigdone.count(out) != 0)
			return;
		sigdone.insert(out);

		if (drivers.count(out) != 0) {
			std::pair<RTLIL::Cell*, std::set<RTLIL::SigBit>> &drv = drivers.at(out);
			if (ez_cells.count(drv.first) == 0) {
				satgen.setContext(&sigmap, "A");
				if (!satgen.importCell(drv.first))
					log_error("Can't create SAT model for cell %s (%s)!\n", RTLIL::id2cstr(drv.first->name), RTLIL::id2cstr(drv.first->type));
				satgen.setContext(&sigmap, "B");
				if (!satgen.importCell(drv.first))
					log_abort();
				ez_cells.insert(drv.first);
			}
			for (auto &bit : drv.second)
				register_cone_worker(pi, sigdone, bit);
		} else {
			register_pi_bit(out);
			pi.insert(out);
		}
	}

	void register_cone(std::vector<RTLIL::SigBit> &pi, RTLIL::SigBit out)
	{
		std::set<RTLIL::SigBit> pi_set, sigdone;
		register_cone_worker(pi_set, sigdone, out);
		pi.clear();
		pi.insert(pi.end(), pi_set.begin(), pi_set.end());
	}

	void analyze(std::vector<RTLIL::SigBit> &reduced_inputs, RTLIL::SigBit output, int prec)
	{
		if (verbose_level >= 1)
			log("[%2d%%]  Analyzing input cone for signal %s:\n", prec, log_signal(output));

		std::vector<RTLIL::SigBit> pi;
		register_cone(pi, output);

		if (verbose_level >= 1)
			log("         Found %d input signals and %d cells.\n", int(pi.size()), int(ez_cells.size()));

		satgen.setContext(&sigmap, "A");
		int output_a = satgen.importSigSpec(output).front();
		int output_undef_a = satgen.importUndefSigSpec(output).front();

		satgen.setContext(&sigmap, "B");
		int output_b = satgen.importSigSpec(output).front();
		int output_undef_b = satgen.importUndefSigSpec(output).front();

		std::set<int> unused_pi_idx;

		for (size_t i = 0; i < pi.size(); i++)
			unused_pi_idx.insert(i);

		while (1)
		{
			std::vector<int> model_pi_idx;
			std::vector<int> model_expr;
			std::vector<bool> model;

			for (size_t i = 0; i < pi.size(); i++)
				if (unused_pi_idx.count(i) != 0) {
					model_pi_idx.push_back(i);
					model_expr.push_back(sat_pi.at(pi[i]));
				}

			if (!ez->solve(model_expr, model, ez->expression(ezSAT::OpOr, model_expr), ez->XOR(output_a, output_b), ez->NOT(output_undef_a), ez->NOT(output_undef_b)))
				break;

			int found_count = 0;
			for (size_t i = 0; i < model_pi_idx.size(); i++)
				if (model[i]) {
					if (verbose_level >= 2)
						log("         Found relevant input: %s\n", log_signal(pi[model_pi_idx[i]]));
					unused_pi_idx.erase(model_pi_idx[i]);
					found_count++;
				}
			log_assert(found_count == 1);
		}

		for (size_t i = 0; i < pi.size(); i++)
			if (unused_pi_idx.count(i) == 0)
				reduced_inputs.push_back(pi[i]);

		if (verbose_level >= 1)
			log("         Reduced input cone contains %d inputs.\n", int(reduced_inputs.size()));
	}
};

struct PerformReduction
{
	SigMap &sigmap;
	drivers_t &drivers;
	std::set<std::pair<RTLIL::SigBit, RTLIL::SigBit>> &inv_pairs;
	pool<SigBit> recursion_guard;

	ezSatPtr ez;
	SatGen satgen;

	std::vector<int> sat_pi, sat_out, sat_def;
	std::vector<RTLIL::SigBit> out_bits, pi_bits;
	std::vector<bool> out_inverted;
	std::vector<int> out_depth;
	int cone_size;

	int register_cone_worker(std::set<RTLIL::Cell*> &celldone, std::map<RTLIL::SigBit, int> &sigdepth, RTLIL::SigBit out)
	{
		if (out.wire == NULL)
			return 0;
		if (sigdepth.count(out) != 0)
			return sigdepth.at(out);

		if (recursion_guard.count(out)) {
			string loop_signals;
			for (auto loop_bit : recursion_guard)
				loop_signals += string(" ") + log_signal(loop_bit);
			log_error("Found logic loop:%s\n", loop_signals.c_str());
		}

		recursion_guard.insert(out);

		if (drivers.count(out) != 0) {
			std::pair<RTLIL::Cell*, std::set<RTLIL::SigBit>> &drv = drivers.at(out);
			if (celldone.count(drv.first) == 0) {
				if (!satgen.importCell(drv.first))
					log_error("Can't create SAT model for cell %s (%s)!\n", RTLIL::id2cstr(drv.first->name), RTLIL::id2cstr(drv.first->type));
				celldone.insert(drv.first);
			}
			int max_child_depth = 0;
			for (auto &bit : drv.second)
				max_child_depth = max(register_cone_worker(celldone, sigdepth, bit), max_child_depth);
			sigdepth[out] = max_child_depth + 1;
		} else {
			pi_bits.push_back(out);
			sat_pi.push_back(satgen.importSigSpec(out).front());
			ez->assume(ez->NOT(satgen.importUndefSigSpec(out).front()));
			sigdepth[out] = 0;
		}

		recursion_guard.erase(out);
		return sigdepth.at(out);
	}

	PerformReduction(SigMap &sigmap, drivers_t &drivers, std::set<std::pair<RTLIL::SigBit, RTLIL::SigBit>> &inv_pairs, std::vector<RTLIL::SigBit> &bits, int cone_size) :
			sigmap(sigmap), drivers(drivers), inv_pairs(inv_pairs), satgen(ez.get(), &sigmap), out_bits(bits), cone_size(cone_size)
	{
		satgen.model_undef = true;

		std::set<RTLIL::Cell*> celldone;
		std::map<RTLIL::SigBit, int> sigdepth;

		for (auto &bit : bits) {
			out_depth.push_back(register_cone_worker(celldone, sigdepth, bit));
			sat_out.push_back(satgen.importSigSpec(bit).front());
			sat_def.push_back(ez->NOT(satgen.importUndefSigSpec(bit).front()));
		}

		if (inv_mode && cone_size > 0) {
			if (!ez->solve(sat_out, out_inverted, ez->expression(ezSAT::OpAnd, sat_def)))
				log_error("Solving for initial model failed!\n");
			for (size_t i = 0; i < sat_out.size(); i++)
				if (out_inverted.at(i))
					sat_out[i] = ez->NOT(sat_out[i]);
		} else
			out_inverted = std::vector<bool>(sat_out.size(), false);
	}

	void analyze_const(std::vector<std::vector<equiv_bit_t>> &results, int idx)
	{
		if (verbose_level == 1)
			log("    Finding const value for %s.\n", log_signal(out_bits[idx]));

		bool can_be_set = ez->solve(ez->AND(sat_out[idx], sat_def[idx]));
		bool can_be_clr = ez->solve(ez->AND(ez->NOT(sat_out[idx]), sat_def[idx]));
		log_assert(!can_be_set || !can_be_clr);

		RTLIL::SigBit value(RTLIL::State::Sx);
		if (can_be_set)
			value = RTLIL::State::S1;
		if (can_be_clr)
			value = RTLIL::State::S0;
		if (verbose_level == 1)
			log("      Constant value for this signal: %s\n", log_signal(value));

		int result_idx = -1;
		for (size_t i = 0; i < results.size(); i++) {
			if (results[i].front().bit == value) {
				result_idx = i;
				break;
			}
		}

		if (result_idx == -1) {
			result_idx = results.size();
			results.push_back(std::vector<equiv_bit_t>());
			equiv_bit_t bit;
			bit.depth = 0;
			bit.inverted = false;
			bit.drv = NULL;
			bit.bit = value;
			results.back().push_back(bit);
		}

		equiv_bit_t bit;
		bit.depth = 1;
		bit.inverted = false;
		bit.drv = drivers.count(out_bits[idx]) ? drivers.at(out_bits[idx]).first : NULL;
		bit.bit = out_bits[idx];
		results[result_idx].push_back(bit);
	}

	void analyze(std::vector<std::set<int>> &results, std::map<int, int> &results_map, std::vector<int> &bucket, std::string indent1, std::string indent2)
	{
		std::string indent = indent1 + indent2;
		const char *indt = indent.c_str();

		if (bucket.size() <= 1)
			return;

		if (verbose_level == 1)
			log("%s  Trying to shatter bucket with %d signals.\n", indt, int(bucket.size()));

		if (verbose_level > 1) {
			std::vector<RTLIL::SigBit> bucket_sigbits;
			for (int idx : bucket)
				bucket_sigbits.push_back(out_bits[idx]);
			log("%s  Trying to shatter bucket with %d signals: %s\n", indt, int(bucket.size()), log_signal(bucket_sigbits));
		}

		std::vector<int> sat_set_list, sat_clr_list;
		for (int idx : bucket) {
			sat_set_list.push_back(ez->AND(sat_out[idx], sat_def[idx]));
			sat_clr_list.push_back(ez->AND(ez->NOT(sat_out[idx]), sat_def[idx]));
		}

		std::vector<int> modelVars = sat_out;
		std::vector<bool> model;

		modelVars.insert(modelVars.end(), sat_def.begin(), sat_def.end());
		if (verbose_level >= 2)
			modelVars.insert(modelVars.end(), sat_pi.begin(), sat_pi.end());

		if (ez->solve(modelVars, model, ez->expression(ezSAT::OpOr, sat_set_list), ez->expression(ezSAT::OpOr, sat_clr_list)))
		{
			int iter_count = 1;

			while (1)
			{
				sat_set_list.clear();
				sat_clr_list.clear();

				std::vector<int> sat_def_list;

				for (int idx : bucket)
					if (!model[sat_out.size() + idx]) {
						sat_set_list.push_back(ez->AND(sat_out[idx], sat_def[idx]));
						sat_clr_list.push_back(ez->AND(ez->NOT(sat_out[idx]), sat_def[idx]));
					} else {
						sat_def_list.push_back(sat_def[idx]);
					}

				if (!ez->solve(modelVars, model, ez->expression(ezSAT::OpOr, sat_set_list), ez->expression(ezSAT::OpOr, sat_clr_list), ez->expression(ezSAT::OpAnd, sat_def_list)))
					break;
				iter_count++;
			}

			if (verbose_level >= 1) {
				int count_set = 0, count_clr = 0, count_undef = 0;
				for (int idx : bucket)
					if (!model[sat_out.size() + idx])
						count_undef++;
					else if (model[idx])
						count_set++;
					else
						count_clr++;
				log("%s    After %d iterations: %d set vs. %d clr vs %d undef\n", indt, iter_count, count_set, count_clr, count_undef);
			}

			if (verbose_level >= 2) {
				for (size_t i = 0; i < pi_bits.size(); i++)
					log("%s       -> PI  %c == %s\n", indt, model[2*sat_out.size() + i] ? '1' : '0', log_signal(pi_bits[i]));
				for (int idx : bucket)
					log("%s       -> OUT %c == %s%s\n", indt, model[sat_out.size() + idx] ? model[idx] ? '1' : '0' : 'x',
							out_inverted.at(idx) ? "~" : "", log_signal(out_bits[idx]));
			}

			std::vector<int> buckets_a;
			std::vector<int> buckets_b;

			for (int idx : bucket) {
				if (!model[sat_out.size() + idx] || model[idx])
					buckets_a.push_back(idx);
				if (!model[sat_out.size() + idx] || !model[idx])
					buckets_b.push_back(idx);
			}
			analyze(results, results_map, buckets_a, indent1 + ".", indent2 + "  ");
			analyze(results, results_map, buckets_b, indent1 + "x", indent2 + "  ");
		}
		else
		{
			std::vector<int> undef_slaves;

			for (int idx : bucket) {
				std::vector<int> sat_def_list;
				for (int idx2 : bucket)
					if (idx != idx2)
						sat_def_list.push_back(sat_def[idx2]);
				if (ez->solve(ez->NOT(sat_def[idx]), ez->expression(ezSAT::OpOr, sat_def_list)))
					undef_slaves.push_back(idx);
			}

			if (undef_slaves.size() == bucket.size()) {
				if (verbose_level >= 1)
					log("%s    Complex undef overlap. None of the signals covers the others.\n", indt);
				// FIXME: We could try to further shatter a group with complex undef overlaps
				return;
			}

			for (int idx : undef_slaves)
				out_depth[idx] = std::numeric_limits<int>::max();

			if (verbose_level >= 1) {
				log("%s    Found %d equivalent signals:", indt, int(bucket.size()));
				for (int idx : bucket)
					log("%s%s%s", idx == bucket.front() ? " " : ", ", out_inverted[idx] ? "~" : "", log_signal(out_bits[idx]));
				log("\n");
			}

			int result_idx = -1;
			for (int idx : bucket) {
				if (results_map.count(idx) == 0)
					continue;
				if (result_idx == -1) {
					result_idx = results_map.at(idx);
					continue;
				}
				int result_idx2 = results_map.at(idx);
				results[result_idx].insert(results[result_idx2].begin(), results[result_idx2].end());
				for (int idx2 : results[result_idx2])
					results_map[idx2] = result_idx;
				results[result_idx2].clear();
			}

			if (result_idx == -1) {
				result_idx = results.size();
				results.push_back(std::set<int>());
			}

			results[result_idx].insert(bucket.begin(), bucket.end());
		}
	}

	void analyze(std::vector<std::vector<equiv_bit_t>> &results, int perc)
	{
		std::vector<int> bucket;
		for (size_t i = 0; i < sat_out.size(); i++)
			bucket.push_back(i);

		std::vector<std::set<int>> results_buf;
		std::map<int, int> results_map;
		analyze(results_buf, results_map, bucket, stringf("[%2d%%] %d ", perc, cone_size), "");

		for (auto &r : results_buf)
		{
			if (r.size() <= 1)
				continue;

			if (verbose_level >= 1) {
				std::vector<RTLIL::SigBit> r_sigbits;
				for (int idx : r)
					r_sigbits.push_back(out_bits[idx]);
				log("  Found group of %d equivalent signals: %s\n", int(r.size()), log_signal(r_sigbits));
			}

			std::vector<int> undef_slaves;

			for (int idx : r) {
				std::vector<int> sat_def_list;
				for (int idx2 : r)
					if (idx != idx2)
						sat_def_list.push_back(sat_def[idx2]);
				if (ez->solve(ez->NOT(sat_def[idx]), ez->expression(ezSAT::OpOr, sat_def_list)))
					undef_slaves.push_back(idx);
			}

			if (undef_slaves.size() == bucket.size()) {
				if (verbose_level >= 1)
					log("    Complex undef overlap. None of the signals covers the others.\n");
				// FIXME: We could try to further shatter a group with complex undef overlaps
				return;
			}

			for (int idx : undef_slaves)
				out_depth[idx] = std::numeric_limits<int>::max();

			std::vector<equiv_bit_t> result;

			for (int idx : r) {
				equiv_bit_t bit;
				bit.depth = out_depth[idx];
				bit.inverted = out_inverted[idx];
				bit.drv = drivers.count(out_bits[idx]) ? drivers.at(out_bits[idx]).first : NULL;
				bit.bit = out_bits[idx];
				result.push_back(bit);
			}

			std::sort(result.begin(), result.end());

			if (result.front().inverted)
				for (auto &bit : result)
					bit.inverted = !bit.inverted;

			for (size_t i = 1; i < result.size(); i++) {
				std::pair<RTLIL::SigBit, RTLIL::SigBit> p(result[0].bit, result[i].bit);
				if (inv_pairs.count(p) != 0)
					result.erase(result.begin() + i);
			}

			if (result.size() > 1)
				results.push_back(result);
		}
	}
};

struct FreduceWorker
{
	RTLIL::Design *design;
	RTLIL::Module *module;

	SigMap sigmap;
	drivers_t drivers;
	std::set<std::pair<RTLIL::SigBit, RTLIL::SigBit>> inv_pairs;

	FreduceWorker(RTLIL::Design *design, RTLIL::Module *module) : design(design), module(module), sigmap(module)
	{
	}

	bool find_bit_in_cone(std::set<RTLIL::Cell*> &celldone, RTLIL::SigBit needle, RTLIL::SigBit haystack)
	{
		if (needle == haystack)
			return true;
		if (haystack.wire == NULL || needle.wire == NULL || drivers.count(haystack) == 0)
			return false;

		std::pair<RTLIL::Cell*, std::set<RTLIL::SigBit>> &drv = drivers.at(haystack);

		if (celldone.count(drv.first))
			return false;
		celldone.insert(drv.first);