summaryrefslogtreecommitdiffstats
path: root/src/python
diff options
context:
space:
mode:
authorBaruch Sterin <baruchs@gmail.com>2011-01-13 22:12:37 +0200
committerBaruch Sterin <baruchs@gmail.com>2011-01-13 22:12:37 +0200
commit811f5631a812968ccdbe157549f2445747053d50 (patch)
treed86691b0af7681aba69935875e5a7d779e24bc36 /src/python
parentc85a763444f696e0125445b5adc675020d736c5d (diff)
downloadabc-811f5631a812968ccdbe157549f2445747053d50.tar.gz
abc-811f5631a812968ccdbe157549f2445747053d50.tar.bz2
abc-811f5631a812968ccdbe157549f2445747053d50.zip
pyabc: reorganize supporting python scripts
Diffstat (limited to 'src/python')
-rw-r--r--src/python/getch.py37
-rw-r--r--src/python/pyabc_split.py345
-rw-r--r--src/python/redirect.py94
-rw-r--r--src/python/setup.py2
4 files changed, 477 insertions, 1 deletions
diff --git a/src/python/getch.py b/src/python/getch.py
new file mode 100644
index 00000000..89e13078
--- /dev/null
+++ b/src/python/getch.py
@@ -0,0 +1,37 @@
+
+class _Getch:
+ """Gets a single character from standard input. Does not echo to the screen."""
+ def __init__(self):
+ try:
+ self.impl = _GetchWindows()
+ except ImportError:
+ self.impl = _GetchUnix()
+
+ def __call__(self): return self.impl()
+
+
+class _GetchUnix:
+ def __init__(self):
+ import tty, sys
+
+ def __call__(self):
+ import sys, tty, termios
+ fd = sys.stdin.fileno()
+ old_settings = termios.tcgetattr(fd)
+ try:
+ tty.setraw(sys.stdin.fileno())
+ ch = sys.stdin.read(1)
+ finally:
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+ return ch
+
+
+class _GetchWindows:
+ def __init__(self):
+ import msvcrt
+
+ def __call__(self):
+ import msvcrt
+ return msvcrt.getch()
+
+getch = _Getch()
diff --git a/src/python/pyabc_split.py b/src/python/pyabc_split.py
new file mode 100644
index 00000000..b52288c2
--- /dev/null
+++ b/src/python/pyabc_split.py
@@ -0,0 +1,345 @@
+"""
+module pyabc_split
+
+Executes python functions and their arguements as separate processes and returns their return values through pickling. This modules offers a single function:
+
+Function: split_all(funcs)
+
+The function returns a generator objects that allowes iteration over the results,
+
+Arguments:
+
+funcs: a list of tuples (f, args) where f is a python function and args is a collection of arguments for f.
+
+Caveats:
+
+1. Global variables in the parent process are not affected by the child processes.
+2. The functions can only return simple types, see the pickle module for details
+3. Signals are currently not handled correctly
+
+Usage:
+
+Assume you would like to run the function f_1(1), f_2(1,2), f_3(1,2,3) in different processes.
+
+def f_1(i):
+ return i+1
+
+def f_2(i,j):
+ return i*10+j+1
+
+def f_3(i,j,k):
+ return i*100+j*10+k+1
+
+Construct a tuple of the function and arguments for each function
+
+t_1 = (f_1, [1])
+t_2 = (f_2, [1,2])
+t_3 = (f_3, [1,2,3])
+
+Create a list containing these tuples:
+
+funcs = [t_1, t_2, t_3]
+
+Use the function split_all() to run these functions in separate processes:
+
+for res in split_all(funcs):
+ print res
+
+The output will be:
+
+2
+13
+124
+
+(The order may be different, except that in this case the processes are so fast that they terminate before the next one is created)
+
+Alternatively, you may quite in the middle, say after the first process returns:
+
+for res in split_all(funcs):
+ print res
+ break
+
+This will kill all processes not yet finished.
+
+To run ABC operations, that required saving the child process state and restoring it at the parent, use abc_split_all().
+
+ import pyabc
+
+ def abc_f(truth):
+ import os
+ print "pid=%d, abc_f(%s)"%(os.getpid(), truth)
+ pyabc.run_command('read_truth %s'%truth)
+ pyabc.run_command('strash')
+
+ funcs = [
+ defer(abc_f)("1000"),
+ defer(abc_f)("0001")
+ ]
+
+ for _ in abc_split_all(funcs):
+ pyabc.run_command('write_verilog /dev/stdout')
+
+Author: Baruch Sterin <sterin@berkeley.edu>
+"""
+
+import os
+import sys
+import pickle
+import signal
+from contextlib import contextmanager
+
+import pyabc
+
+class _sigint_critical_section(object):
+ def __init__(self):
+ self.blocked = False
+
+ def __enter__(self):
+ self.acquire()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.release()
+
+ def acquire(self):
+ if not self.blocked:
+ self.blocked = True
+ pyabc.block_sigint()
+
+ def release(self):
+ if self.blocked:
+ self.blocked = False
+ pyabc.restore_sigint_block()
+
+class _splitter(object):
+
+ def __init__(self, funcs):
+ self.funcs = funcs
+ self.pids = []
+ self.fds = {}
+ self.results = {}
+
+ def is_done(self):
+ return len(self.fds) == 0
+
+ def cleanup(self):
+
+ # close pipes and kill child processes
+ for pid,(i,fd) in self.fds.iteritems():
+ os.close(fd)
+ os.kill( pid, signal.SIGINT )
+
+ with _sigint_critical_section() as cs:
+ # wait for termination and update result
+ for pid, _ in self.fds.iteritems():
+ os.waitpid( pid, 0 )
+ pyabc.remove_child_pid(pid)
+ self.results[pid] = None
+
+ self.fds = {}
+
+ def child( self, fdw, f):
+ # call function
+ res = f()
+
+ # write return value into pipe
+ with os.fdopen( fdw, "w" ) as fout:
+ pickle.dump(res, fout)
+
+ return 0
+
+ def fork_one(self, f):
+
+ # create a pipe to communicate with the child process
+ pr,pw = os.pipe()
+
+ parentpid = os.getpid()
+ rc = 1
+
+ try:
+
+ with _sigint_critical_section() as cs:
+ # create child process
+ pid = os.fork()
+
+ if pid == 0:
+ # child process:
+ cs.release()
+ os.close(pr)
+ rc = self.child( pw, f)
+ os._exit(rc)
+ else:
+ # parent process:
+ pyabc.add_child_pid(pid)
+ os.close(pw)
+ return (pid, pr)
+
+ finally:
+ if os.getpid() != parentpid:
+ os._exit(rc)
+
+ def fork_all(self):
+ for i,f in enumerate(self.funcs):
+ pid, fd = self.fork_one(f)
+ self.pids.append(pid)
+ self.fds[pid] = (i,fd)
+
+ def get_next_result(self):
+
+ # wait for the next child process to terminate
+ pid, rc = os.wait()
+ assert pid in self.fds
+
+ # retrieve the pipe file descriptor1
+ i, fd = self.fds[pid]
+ del self.fds[pid]
+
+ assert pid not in self.fds
+
+ # read result from file
+ with os.fdopen( fd, "r" ) as fin:
+ try:
+ return (i,pickle.load(fin))
+ except EOFError, pickle.UnpicklingError:
+ return (i, None)
+
+@contextmanager
+def _splitter_wrapper(funcs):
+ # ensure cleanup of child processes
+ s = _splitter(funcs)
+ try:
+ yield s
+ finally:
+ s.cleanup()
+
+def split_all_full(funcs):
+ # provide an iterator for child process result
+ with _splitter_wrapper(funcs) as s:
+
+ s.fork_all()
+
+ while not s.is_done():
+ yield s.get_next_result()
+
+def defer(f):
+ return lambda *args, **kwargs: lambda : f(*args,**kwargs)
+
+def split_all(funcs):
+ for _, res in split_all_full( ( defer(f)(*args) for f,args in funcs ) ):
+ yield res
+
+import tempfile
+
+@contextmanager
+def temp_file_names(suffixes):
+ names = []
+ try:
+ for suffix in suffixes:
+ with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as file:
+ names.append( file.name )
+ yield names
+ finally:
+ for name in names:
+ os.unlink(name)
+
+class abc_state(object):
+ def __init__(self):
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.aig') as file:
+ self.aig = file.name
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.log') as file:
+ self.log = file.name
+ pyabc.run_command(r'write_status %s'%self.log)
+ pyabc.run_command(r'write_aiger %s'%self.aig)
+
+ def __del__(self):
+ os.unlink( self.aig )
+ os.unlink( self.log )
+
+ def restore(self):
+ pyabc.run_command(r'read_aiger %s'%self.aig)
+ pyabc.run_command(r'read_status %s'%self.log)
+
+def abc_split_all(funcs):
+ import pyabc
+
+ def child(f, aig, log):
+ res = f()
+ pyabc.run_command(r'write_status %s'%log)
+ pyabc.run_command(r'write_aiger %s'%aig)
+ return res
+
+ def parent(res, aig, log):
+ pyabc.run_command(r'read_aiger %s'%aig)
+ pyabc.run_command(r'read_status %s'%log)
+ return res
+
+ with temp_file_names( ['.aig','.log']*len(funcs) ) as tmp:
+
+ funcs = [ defer(child)(f, tmp[2*i],tmp[2*i+1]) for i,f in enumerate(funcs) ]
+
+ for i, res in split_all_full(funcs):
+ yield i, parent(res, tmp[2*i],tmp[2*i+1])
+
+if __name__ == "__main__":
+
+ # define some functions to run
+
+ def f_1(i):
+ return i+1
+
+ def f_2(i,j):
+ return i*10+j+1
+
+ def f_3(i,j,k):
+ return i*100+j*10+k+1
+
+ # Construct a tuple of the function and arguments for each function
+
+ t_1 = (f_1, [1])
+ t_2 = (f_2, [1,2])
+ t_3 = (f_3, [1,2,3])
+
+ # Create a list containing these tuples:
+
+ funcs = [t_1, t_2, t_3]
+
+ # Use the function split_all() to run these functions in separate processes:
+
+ for res in split_all(funcs):
+ print res
+
+ # Alternatively, quit after the first process returns:
+
+ for res in split_all(funcs):
+ print res
+ break
+
+ # For operations with ABC that save and restore status
+
+ import pyabc
+
+ def abc_f(truth):
+ import os
+ print "pid=%d, abc_f(%s)"%(os.getpid(), truth)
+ pyabc.run_command('read_truth %s'%truth)
+ pyabc.run_command('strash')
+ return 100
+
+ funcs = [
+ defer(abc_f)("1000"),
+ defer(abc_f)("0001")
+ ]
+
+ best = None
+
+ for i, res in abc_split_all(funcs):
+ print i, res
+ if best is None:\
+ # save state
+ best = abc_state()
+ pyabc.run_command('write_verilog /dev/stdout')
+
+ # if there is a saved state, restore it
+ if best is not None:
+ best.restore()
+ pyabc.run_command('write_verilog /dev/stdout')
diff --git a/src/python/redirect.py b/src/python/redirect.py
new file mode 100644
index 00000000..498fe150
--- /dev/null
+++ b/src/python/redirect.py
@@ -0,0 +1,94 @@
+"""
+
+A simple context manager for redirecting streams in Python.
+The streams are redirected at the the C runtime level so that the output of C extensions
+that use stdio will also be redirected.
+
+null_file : a stream representing the null device (e.g. /dev/null on Unix)
+redirect: a context manager for redirecting streams
+
+Author: Baruch Sterin (sterin@berkeley.edu)
+
+"""
+
+import os
+import sys
+
+from contextlib import contextmanager
+
+null_file = open( os.devnull, "w" )
+
+@contextmanager
+def _dup( f ):
+ fd = os.dup( f.fileno() )
+ yield fd
+ os.close(fd)
+
+@contextmanager
+def redirect(dst = null_file, src = sys.stdout):
+
+ """
+ Redirect the src stream into dst.
+
+ Example:
+ with redirect( open("somefile.txt", sys.stdout ) ):
+ do some stuff ...
+ """
+
+ if src.fileno() == dst.fileno():
+ yield
+ return
+
+ with _dup( src ) as fd_dup_src:
+
+ dst.flush()
+
+ src.flush()
+ os.close( src.fileno() )
+ os.dup2( dst.fileno(), src.fileno() )
+
+ yield
+
+ src.flush()
+ os.close( src.fileno() )
+ os.dup2( fd_dup_src, src.fileno() )
+
+def start_redirect(dst = null_file, src = sys.stdout):
+
+ """
+ Start redirection of src stream into dst. Return the duplicated file handle of the source.
+
+ Example:
+ fd = start_redirect( open("somefile.txt"), sys.stdout )
+ ... do some stuff ...
+ end_redirect(sys.stdout, fd)
+ """
+
+ if src.fileno() == dst.fileno():
+ return None
+
+ fd_dup_src = os.dup( src.fileno() )
+
+ dst.flush()
+ src.flush()
+
+ os.close( src.fileno() )
+ os.dup2( dst.fileno(), src.fileno() )
+
+ return fd_dup_src
+
+def end_redirect(src, fd_dup_src):
+
+ """
+ End redirection of stream src.Redirect the src stream into dst. src is the source stream and fd_dup_src is the value returned by
+ start_redirect()
+ """
+
+ if fd_dup_src is None:
+ return
+
+ src.flush()
+ os.close( src.fileno() )
+ os.dup2( fd_dup_src, src.fileno() )
+
+ os.close(fd_dup_src)
diff --git a/src/python/setup.py b/src/python/setup.py
index af2f5547..a832f511 100644
--- a/src/python/setup.py
+++ b/src/python/setup.py
@@ -62,5 +62,5 @@ setup(
name='pyabc',
version='1.0',
ext_modules=[ext],
- py_modules=['pyabc']
+ py_modules=['pyabc','getch','pyabc_split','redirect']
)