diff --git a/admin/python-devel.m4 b/admin/python-devel.m4 index 50d48ff..3114db0 100644 --- a/admin/python-devel.m4 +++ b/admin/python-devel.m4 @@ -7,7 +7,7 @@ AC_DEFUN([BKL_PYTHON_DEVEL],[ # Check for Python include path AC_MSG_CHECKING([for Python include path]) - python_path=`${PYTHON} -c "from distutils import sysconfig; print sysconfig.get_python_inc()"` + python_path=`${PYTHON} -c "from distutils import sysconfig; print(sysconfig.get_python_inc())"` AC_MSG_RESULT([$python_path]) if test -z "$python_path" ; then AC_MSG_ERROR([cannot find Python include path]) diff --git a/src/bakefile.py b/src/bakefile.py index 806c1b8..37fadcb 100755 --- a/src/bakefile.py +++ b/src/bakefile.py @@ -149,7 +149,7 @@ def run(args): sys.stderr.write("Warning: disabling XML cache because it's not supported by Python 2.2\n") except KeyError: # python < 2.3 didn't have protocol argument sys.stderr.write("Warning: disabling XML cache because it's not supported by Python 2.2\n") - except Exception, e: + except Exception as e: sys.stderr.write("Warning: disabling XML cache because an error occured while loading %s:\n %s\n" % (options.xml_cache, e)) formats.loadFormats() @@ -235,7 +235,7 @@ if __name__ == '__main__': prof.close() else: run(sys.argv[1:]) - except errors.ErrorBase, e: + except errors.ErrorBase as e: sys.stderr.write('%s\n' % e) sys.exit(1) except KeyboardInterrupt: diff --git a/src/bakefile_gen.py b/src/bakefile_gen.py index 19d4b75..d1b1046 100755 --- a/src/bakefile_gen.py +++ b/src/bakefile_gen.py @@ -26,6 +26,7 @@ # $Id$ # +from __future__ import print_function import sys, os, os.path, glob, fnmatch, shutil, re, time from optparse import OptionParser @@ -136,7 +137,7 @@ def loadTargets(filename, defaultFlags=[]): def _loadFile(filename): if verbose: - print 'loading task description from %s...' % filename + print('loading task description from %s...' % filename) try: root = xmlparser.parseFile(filename, xmlparser.NS_BAKEFILE_GEN) except xmlparser.ParsingError: @@ -197,7 +198,7 @@ def loadTargets(filename, defaultFlags=[]): raise ReaderError(root, 'incorrect root node (not a bakefile_gen file?)') if verbose: - print 'scanning directories for bakefiles...' + print('scanning directories for bakefiles...') for cmd in [x for x in root.children if x.name == 'input']: globs = cmd.value.replace('/', os.sep).split() @@ -206,7 +207,7 @@ def loadTargets(filename, defaultFlags=[]): files[f] = FileInfo(f) if verbose: - print 'building rules...' + print('building rules...') for cmd in root.children: if cmd.name == 'disable-formats': @@ -304,9 +305,9 @@ def updateTargets(jobs, pretend=False, keepGoing=False, alwaysMakeAll=False, """Updates all targets. Run jobs instances of bakefile simultaneously""" if verbose: if alwaysMakeAll: - print 'pretending all makefiles are out of date...' + print('pretending all makefiles are out of date...') else: - print 'determining which makefiles are out of date...' + print('determining which makefiles are out of date...') needUpdate = [] total = 0 @@ -338,7 +339,7 @@ def updateTargets(jobs, pretend=False, keepGoing=False, alwaysMakeAll=False, totalNeedUpdate = len(needUpdate) if verbose: - print ' ...%i out of %i will be updated' % (totalNeedUpdate, total) + print(' ...%i out of %i will be updated' % (totalNeedUpdate, total)) class JobDesc: def __init__(self, data, jobNum, xmlcache, pretend=False): @@ -353,8 +354,8 @@ def updateTargets(jobs, pretend=False, keepGoing=False, alwaysMakeAll=False, def run(self): """Starts the subprocess.""" if not quiet: - print '[%i/%i] generating %s from %s' % ( - self.jobNum, totalNeedUpdate, self.format, self.filename) + print('[%i/%i] generating %s from %s' % ( + self.jobNum, totalNeedUpdate, self.format, self.filename)) sys.stdout.flush() cmd = _getBakefileExecutable() cmd.append('-f%s' % _get_base_format(self.format)) @@ -370,7 +371,7 @@ def updateTargets(jobs, pretend=False, keepGoing=False, alwaysMakeAll=False, cmd.append('--dry-run') cmd.append(self.filename) if verbose: - print ' '.join(cmd) + print(' '.join(cmd)) if not pretend: self.process = subprocess.Popen(cmd) @@ -406,7 +407,7 @@ def updateTargets(jobs, pretend=False, keepGoing=False, alwaysMakeAll=False, return 0 # no modified files else: raise errors.Error('bakefile exited with error (%i)' % self.process.returncode) - except IOError, e: + except IOError as e: raise errors.Error('failed to run bakefile: %s' % e) finally: os.remove(self.tempDeps) @@ -460,12 +461,12 @@ def updateTargets(jobs, pretend=False, keepGoing=False, alwaysMakeAll=False, finally: left = [p for p in childProcesses if p != None] if len(left) > 0: - print '[bakefile_gen] waiting for remaining jobs to finish after error...' + print('[bakefile_gen] waiting for remaining jobs to finish after error...') for p in left: try: p.wait() modifiedFiles += p.finish() - except Exception, e: + except Exception as e: pass # ignore further errors @@ -475,9 +476,9 @@ def updateTargets(jobs, pretend=False, keepGoing=False, alwaysMakeAll=False, if not quiet: if dryRun: - print '%i files would be modified' % modifiedFiles + print('%i files would be modified' % modifiedFiles) else: - print '%i files modified' % modifiedFiles + print('%i files modified' % modifiedFiles) def cleanTargets(pretend=False, dryRun=False): @@ -511,22 +512,22 @@ def cleanTargets(pretend=False, dryRun=False): continue if method not in ['replace','mergeBlocks']: continue if _isGeneratedBySomethingElse(o): continue - if verbose: print 'deleting %s' % o + if verbose: print('deleting %s' % o) if pretend: - print 'rm %s' % o + print('rm %s' % o) elif not dryRun: os.remove(o) if key in dependencies.dirs_db: for d in dependencies.dirs_db[key]: if not os.path.isdir(d): continue - if verbose: print 'deleting %s' % d + if verbose: print('deleting %s' % d) if pretend: - print 'rmdir %s' % d + print('rmdir %s' % d) elif not dryRun: try: os.rmdir(d) - except OSError, e: + except OSError as e: sys.stderr.write("Warning: cannot delete %s: %s\n" % (d, e)) def listOutputFiles(jobs, alwaysMakeAll=0): @@ -539,7 +540,7 @@ def listOutputFiles(jobs, alwaysMakeAll=0): absf = os.path.abspath(f) for fmt in files[f].formats: for outf, m in dependencies.deps_db[(absf,fmt)].outputs: - print outf + print(outf) def run(args): @@ -635,7 +636,7 @@ def run(args): keepGoing=options.keepGoing, alwaysMakeAll=options.alwaysMakeAll, dryRun=options.dryRun) - except errors.ErrorBase, e: + except errors.ErrorBase as e: sys.stderr.write('[bakefile_gen] %s' % str(e)) sys.exit(1) diff --git a/src/bkl_c.i b/src/bkl_c.i index f23950a..ed49400 100644 --- a/src/bkl_c.i +++ b/src/bkl_c.i @@ -31,6 +31,9 @@ %module bkl_c +%begin %{ +#define SWIG_PYTHON_STRICT_BYTE_CHAR +%} /* ------------------------------------------------------------------------ */ /* Expressions evaluation */ diff --git a/src/bottlenecks.c b/src/bottlenecks.c index 26cf73e..53b3a1a 100644 --- a/src/bottlenecks.c +++ b/src/bottlenecks.c @@ -94,6 +94,7 @@ const char *doEvalExpr(const char *expr, const char *text_begin, *code_begin; unsigned brackets = 0; const char *origexpr = expr; + fprintf(stderr, "BLAH0\n"); assert(expr != NULL); len = strlen(expr); @@ -126,12 +127,13 @@ const char *doEvalExpr(const char *expr, moreArgs, text_begin, textlen); if (PyErr_Occurred()) { + printf("BLAH1\n"); RELEASE_BUFFER(); return NULL; } - size = PyString_Size(r); + size = PyBytes_Size(r); ENSURE_BUFFER(output - txtbuf + size); - memcpy(output, PyString_AsString(r), size); + memcpy(output, PyBytes_AsString(r), size); output += size; Py_DECREF(r); } @@ -160,12 +162,14 @@ const char *doEvalExpr(const char *expr, add_dict); if (PyErr_Occurred()) { + printf("BLAH2\n"); RELEASE_BUFFER(); return NULL; } - size = PyString_Size(r); + printf("BLAH2 type=%s\n", r->ob_type->tp_name); + size = PyBytes_Size(r); ENSURE_BUFFER(output - txtbuf + size); - memcpy(output, PyString_AsString(r), size); + memcpy(output, PyBytes_AsString(r), size); output += size; Py_DECREF(r); break; @@ -198,6 +202,7 @@ const char *doEvalExpr(const char *expr, if (brackets > 0) { + printf("BLAH3\n"); PyErr_Format(PyExc_RuntimeError, "unmatched brackets in '%s'", origexpr); return NULL; @@ -222,12 +227,13 @@ const char *doEvalExpr(const char *expr, moreArgs, text_begin, textlen); if (PyErr_Occurred()) { + printf("BLAH4\n"); RELEASE_BUFFER(); return NULL; } - size = PyString_Size(r); + size = PyBytes_Size(r); ENSURE_BUFFER(output - txtbuf + size); - memcpy(output, PyString_AsString(r), size); + memcpy(output, PyBytes_AsString(r), size); output += size; Py_DECREF(r); } diff --git a/src/containers.py b/src/containers.py index 41edc8e..2d71596 100644 --- a/src/containers.py +++ b/src/containers.py @@ -41,7 +41,7 @@ class OrderedDict(dict): self.order = [] def __setitem__(self, key, value): - if not self.has_key(key): + if key not in self: self.order.append(key) dict.__setitem__(self, key, value) def __delitem__(self, key): @@ -103,7 +103,7 @@ class OrderedDictWithClasification(OrderedDict): self.getGroup = getGroupPredicate def __setitem__(self, key, value): - if not self.has_key(key): + if key not in self: self.order[self.getGroup(value)].append(key) dict.__setitem__(self, key, value) def __delitem__(self, key): diff --git a/src/dependencies.py b/src/dependencies.py index e6036ee..3af824e 100644 --- a/src/dependencies.py +++ b/src/dependencies.py @@ -26,7 +26,11 @@ # Keeping track of dependencies # -import cPickle, os.path, time, glob +try: + import cPickle +except ImportError: + import pickle as cPickle +import os.path, time, glob DEPS_FORMAT_VERSION = 6 @@ -133,7 +137,7 @@ def load(filename): def __loadDb(f, orig_db): try: db = cPickle.load(f) - except EOFError, e: + except EOFError as e: # so that the callers can only catch IOError raise IOError(e) diff --git a/src/empy/README b/src/empy/README index a7da0d7..a1b7c89 100644 --- a/src/empy/README +++ b/src/empy/README @@ -42,7 +42,7 @@ Overview Getting the software - The current version of empy is 3.3. + The current version of empy is 3.3.4. The latest version of the software is available in a tarball here: "http://www.alcyone.com/software/empy/empy-latest.tar.gz", @@ -55,11 +55,8 @@ Getting the software Requirements - EmPy should work with any version of Python from 1.5.2 onward. It - has been tested with all major versions of CPython from 1.5 up, - and Jython from 2.0 up (using Java runtimes 1.3 and 1.4). The - included test script is intended to run on Unix-like systems with - a Bourne shell. + EmPy should work with any version of Python from 2.4 onward, + including 3.x. License @@ -2106,6 +2103,17 @@ Wish list Release history + - 3.3.4; 2019 Feb 26. Minor fix for a Python 3 compatibilty + issue. + + - 3.3.3; 2017 Feb 12. Fix for 'defined' call. + + - 3.3.2; 2014 Jan 24. Additional fix for source compatibility + between 2.x and 3.0. + + - 3.3.1; 2014 Jan 22. Source compatibility for 2.x and 3.0; + 1.x and Jython compatibility dropped. + - 3.3; 2003 Oct 27. Custom markup '@<...>'; remove separate pseudomodule instance for greater transparency; deprecate 'interpreter' attribute of pseudomodule; deprecate auxiliary @@ -2307,4 +2315,4 @@ Author Version - Version 3.3 $Date: 2003/10/27 $ $Author: max $ + Version 3.3.3 $Date: 2014-01-24 13:39:38 -0800 (Fri, 24 Jan 2014) $ $Author: max $ diff --git a/src/empy/em.py b/src/empy/em.py index 21eb831..f73a91c 100644 --- a/src/empy/em.py +++ b/src/empy/em.py @@ -1,5 +1,5 @@ # -# $Id$ $Date: 2003/10/27 $ +# $Id: em.py 5709 2019-02-26 21:40:43Z max $ $Date: 2019-02-26 13:40:43 -0800 (Tue, 26 Feb 2019) $ """ A system for processing Python as markup embedded in text. @@ -7,31 +7,49 @@ A system for processing Python as markup embedded in text. __program__ = 'empy' -__version__ = '3.3' +__version__ = '3.3.4' __url__ = 'http://www.alcyone.com/software/empy/' __author__ = 'Erik Max Francis ' -__copyright__ = 'Copyright (C) 2002-2003 Erik Max Francis' +__copyright__ = 'Copyright (C) 2002-2019 Erik Max Francis' __license__ = 'LGPL' import copy import getopt +import inspect import os import re -import string import sys import types +# 2.x/3.0 compatbility try: - # The equivalent of import cStringIO as StringIO. - import cStringIO - StringIO = cStringIO - del cStringIO + from StringIO import StringIO except ImportError: - import StringIO + from io import StringIO -# For backward compatibility, we can't assume these are defined. -False, True = 0, 1 +try: + _unicode = unicode # bytes will be undefined in 3.x releases + _str = str + _unichr = unichr + _input = raw_input + def _exec(code, globals, locals=None): + if globals is None: + exec("""exec code""") + else: + if locals is None: + exec("""exec code in globals""") + else: + exec("""exec code in globals, locals""") +except NameError: + _unicode = str + _str = bytes + _unichr = chr + _input = input + try: + _exec = __builtins__.__dict__['exec'] + except AttributeError: + _exec = __builtins__['exec'] # Some basic defaults. FAILURE_CODE = 1 @@ -297,9 +315,9 @@ class MetaError(Exception): self.exc = exc def __str__(self): - backtrace = map(lambda x: str(x), self.contexts) - return "%s: %s (%s)" % (self.exc.__class__, self.exc, \ - (string.join(backtrace, ', '))) + backtrace = [str(x) for x in self.contexts] + return "%s: %s (%s)" % (self.exc.__class__, self.exc, + (', '.join(backtrace))) class Subsystem: @@ -313,14 +331,9 @@ class Subsystem: self.outputEncoding = None self.errors = None - def initialize(self, inputEncoding=None, outputEncoding=None, \ + def initialize(self, inputEncoding=None, outputEncoding=None, inputErrors=None, outputErrors=None): self.useUnicode = True - try: - unicode - import codecs - except (NameError, ImportError): - raise SubsystemError, "Unicode subsystem unavailable" defaultEncoding = sys.getdefaultencoding() if inputEncoding is None: inputEncoding = defaultEncoding @@ -337,7 +350,7 @@ class Subsystem: def assertUnicode(self): if not self.useUnicode: - raise SubsystemError, "Unicode subsystem unavailable" + raise SubsystemError("Unicode subsystem unavailable") def open(self, name, mode=None): if self.useUnicode: @@ -380,14 +393,14 @@ class Stack: try: return self.data[-1] except IndexError: - raise StackUnderflowError, "stack is empty for top" + raise StackUnderflowError("stack is empty for top") def pop(self): """Pop the top element off the stack and return it.""" try: return self.data.pop() except IndexError: - raise StackUnderflowError, "stack is empty for pop" + raise StackUnderflowError("stack is empty for pop") def push(self, object): """Push an element onto the top of the stack.""" @@ -395,7 +408,7 @@ class Stack: def filter(self, function): """Filter the elements of the stack through the function.""" - self.data = filter(function, self.data) + self.data = list(filter(function, self.data)) def purge(self): """Purge the stack.""" @@ -405,14 +418,15 @@ class Stack: """Create a duplicate of this stack.""" return self.__class__(self.data[:]) - def __nonzero__(self): return len(self.data) != 0 + def __nonzero__(self): return len(self.data) != 0 # 2.x + def __bool__(self): return len(self.data) != 0 # 3.x def __len__(self): return len(self.data) def __getitem__(self, index): return self.data[-(index + 1)] def __repr__(self): - return '<%s instance at 0x%x [%s]>' % \ - (self.__class__, id(self), \ - string.join(map(repr, self.data), ', ')) + return ('<%s instance at 0x%x [%s]>' % + (self.__class__, id(self), + ', '.join(repr(x) for x in self.data))) class AbstractFile: @@ -429,7 +443,7 @@ class AbstractFile: self.mode = mode self.buffered = buffered if buffered: - self.bufferFile = StringIO.StringIO() + self.bufferFile = StringIO() else: self.bufferFile = theSubsystem.open(filename, mode) # Okay, we got this far, so the AbstractFile is initialized. @@ -477,7 +491,7 @@ class Diversion: strings or (readable) file objects.""" def __init__(self): - self.file = StringIO.StringIO() + self.file = StringIO() # These methods define the writable file-like interface for the # diversion. @@ -503,7 +517,7 @@ class Diversion: def asFile(self): """Return the diversion as a file.""" - return StringIO.StringIO(self.file.getvalue()) + return StringIO(self.file.getvalue()) class Stream: @@ -543,15 +557,16 @@ class Stream: independently.""" if shortcut == 0: return NullFilter() - elif type(shortcut) is types.FunctionType or \ - type(shortcut) is types.BuiltinFunctionType or \ - type(shortcut) is types.BuiltinMethodType or \ - type(shortcut) is types.LambdaType: + elif (isinstance(shortcut, types.FunctionType) or + inspect.ismethoddescriptor(shortcut) or + isinstance(shortcut, types.BuiltinFunctionType) or + isinstance(shortcut, types.BuiltinMethodType) or + isinstance(shortcut, types.LambdaType)): return FunctionFilter(shortcut) - elif type(shortcut) is types.StringType: + elif isinstance(shortcut, _str) or isinstance(shortcut, _unicode): return StringFilter(filter) - elif type(shortcut) is types.DictType: - raise NotImplementedError, "mapping filters not yet supported" + elif isinstance(shortcut, dict): + raise NotImplementedError("mapping filters not yet supported") else: # Presume it's a plain old filter. return shortcut @@ -576,7 +591,7 @@ class Stream: # Shortcuts for "no filter." self.filter = self.file else: - if type(shortcut) in (types.ListType, types.TupleType): + if isinstance(shortcut, list) or isinstance(shortcut, tuple): shortcuts = list(shortcut) else: shortcuts = [shortcut] @@ -625,43 +640,43 @@ class Stream: """Create a diversion if one does not already exist, but do not divert to it yet.""" if name is None: - raise DiversionError, "diversion name must be non-None" - if not self.diversions.has_key(name): + raise DiversionError("diversion name must be non-None") + if name not in self.diversions: self.diversions[name] = Diversion() def retrieve(self, name): """Retrieve the given diversion.""" if name is None: - raise DiversionError, "diversion name must be non-None" - if self.diversions.has_key(name): + raise DiversionError("diversion name must be non-None") + if name in self.diversions: return self.diversions[name] else: - raise DiversionError, "nonexistent diversion: %s" % name + raise DiversionError("nonexistent diversion: %s" % name) def divert(self, name): """Start diverting.""" if name is None: - raise DiversionError, "diversion name must be non-None" + raise DiversionError("diversion name must be non-None") self.create(name) self.currentDiversion = name def undivert(self, name, purgeAfterwards=False): """Undivert a particular diversion.""" if name is None: - raise DiversionError, "diversion name must be non-None" - if self.diversions.has_key(name): + raise DiversionError("diversion name must be non-None") + if name in self.diversions: diversion = self.diversions[name] self.filter.write(diversion.asString()) if purgeAfterwards: self.purge(name) else: - raise DiversionError, "nonexistent diversion: %s" % name + raise DiversionError("nonexistent diversion: %s" % name) def purge(self, name): """Purge the specified diversion.""" if name is None: - raise DiversionError, "diversion name must be non-None" - if self.diversions.has_key(name): + raise DiversionError("diversion name must be non-None") + if name in self.diversions: del self.diversions[name] if self.currentDiversion == name: self.currentDiversion = None @@ -670,8 +685,7 @@ class Stream: """Undivert all pending diversions.""" if self.diversions: self.revert() # revert before undiverting! - names = self.diversions.keys() - names.sort() + names = sorted(self.diversions.keys()) for name in names: self.undivert(name) if purgeAfterwards: @@ -778,6 +792,8 @@ class Filter: """Return the next filter/file-like object in the sequence, or None.""" return self.sink + def __next__(self): return self.next() + def write(self, data): """The standard write method; this must be overridden in subclasses.""" raise NotImplementedError @@ -847,13 +863,14 @@ class StringFilter(Filter): filters any incoming data through it.""" def __init__(self, table): - if not (type(table) == types.StringType and len(table) == 256): - raise FilterError, "table must be 256-character string" + if not ((isinstance(table, _str) or isinstance(table, _unicode)) + and len(table) == 256): + raise FilterError("table must be 256-character string") Filter.__init__(self) self.table = table def write(self, data): - self.sink.write(string.translate(data, self.table)) + self.sink.write(data.translate(self.table)) class BufferedFilter(Filter): @@ -867,7 +884,7 @@ class BufferedFilter(Filter): self.buffer = '' def write(self, data): - self.buffer = self.buffer + data + self.buffer += data def flush(self): if self.buffer: @@ -886,8 +903,7 @@ class SizeBufferedFilter(BufferedFilter): def write(self, data): BufferedFilter.write(self, data) while len(self.buffer) > self.bufferSize: - chunk, self.buffer = \ - self.buffer[:self.bufferSize], self.buffer[self.bufferSize:] + chunk, self.buffer = self.buffer[:self.bufferSize], self.buffer[self.bufferSize:] self.sink.write(chunk) class LineBufferedFilter(BufferedFilter): @@ -900,7 +916,7 @@ class LineBufferedFilter(BufferedFilter): def write(self, data): BufferedFilter.write(self, data) - chunks = string.split(self.buffer, '\n') + chunks = self.buffer.split('\n') for chunk in chunks[:-1]: self.sink.write(chunk + '\n') self.buffer = chunks[-1] @@ -938,7 +954,7 @@ class Context: if self.pause: self.pause = False else: - self.line = self.line + quantity + self.line += quantity def identify(self): return self.name, self.line @@ -962,7 +978,7 @@ class Hook: def deregister(self, interpreter): if interpreter is not self.interpreter: - raise Error, "hook not associated with this interpreter" + raise Error("hook not associated with this interpreter") self.interpreter = None def push(self): @@ -1059,13 +1075,13 @@ class VerboseHook(Hook): self.name = name def __call__(self, **keywords): - self.hook.output.write("%s%s: %s\n" % \ - (' ' * self.hook.indent, \ + self.hook.output.write("%s%s: %s\n" % + (' ' * self.hook.indent, self.name, repr(keywords))) for attribute in dir(Hook): - if attribute[:1] != '_' and \ - attribute not in self.EXEMPT_ATTRIBUTES: + if (attribute[:1] != '_' and + attribute not in self.EXEMPT_ATTRIBUTES): self.__dict__[attribute] = FakeMethod(self, attribute) @@ -1132,7 +1148,7 @@ class CommentToken(ExpansionToken): if loc >= 0: self.comment = scanner.chop(loc, 1) else: - raise TransientParseError, "comment expects newline" + raise TransientParseError("comment expects newline") def string(self): return '%s#%s\n' % (self.prefix, self.comment) @@ -1142,9 +1158,9 @@ class ContextNameToken(ExpansionToken): def scan(self, scanner): loc = scanner.find('\n') if loc >= 0: - self.name = string.strip(scanner.chop(loc, 1)) + self.name = scanner.chop(loc, 1).strip() else: - raise TransientParseError, "context name expects newline" + raise TransientParseError("context name expects newline") def run(self, interpreter, locals): context = interpreter.context() @@ -1158,9 +1174,9 @@ class ContextLineToken(ExpansionToken): try: self.line = int(scanner.chop(loc, 1)) except ValueError: - raise ParseError, "context line requires integer" + raise ParseError("context line requires integer") else: - raise TransientParseError, "context line expects newline" + raise TransientParseError("context line expects newline") def run(self, interpreter, locals): context = interpreter.context() @@ -1183,7 +1199,7 @@ class EscapeToken(ExpansionToken): result = '\x08' elif code == 'd': # decimal code decimalCode = scanner.chop(3) - result = chr(string.atoi(decimalCode, 10)) + result = chr(int(decimalCode, 10)) elif code == 'e': # ESC result = '\x1b' elif code == 'f': # FF @@ -1196,20 +1212,19 @@ class EscapeToken(ExpansionToken): theSubsystem.assertUnicode() import unicodedata if scanner.chop(1) != '{': - raise ParseError, r"Unicode name escape should be \N{...}" + raise ParseError("Unicode name escape should be \\N{...}") i = scanner.find('}') name = scanner.chop(i, 1) try: result = unicodedata.lookup(name) except KeyError: - raise SubsystemError, \ - "unknown Unicode character name: %s" % name + raise SubsystemError("unknown Unicode character name: %s" % name) elif code == 'o': # octal code octalCode = scanner.chop(3) - result = chr(string.atoi(octalCode, 8)) + result = chr(int(octalCode, 8)) elif code == 'q': # quaternary code quaternaryCode = scanner.chop(4) - result = chr(string.atoi(quaternaryCode, 4)) + result = chr(int(quaternaryCode, 4)) elif code == 'r': # CR result = '\x0d' elif code in 's ': # SP @@ -1219,32 +1234,32 @@ class EscapeToken(ExpansionToken): elif code in 'u': # Unicode 16-bit hex literal theSubsystem.assertUnicode() hexCode = scanner.chop(4) - result = unichr(string.atoi(hexCode, 16)) + result = _unichr(int(hexCode, 16)) elif code in 'U': # Unicode 32-bit hex literal theSubsystem.assertUnicode() hexCode = scanner.chop(8) - result = unichr(string.atoi(hexCode, 16)) + result = _unichr(int(hexCode, 16)) elif code == 'v': # VT result = '\x0b' elif code == 'x': # hexadecimal code hexCode = scanner.chop(2) - result = chr(string.atoi(hexCode, 16)) + result = chr(int(hexCode, 16)) elif code == 'z': # EOT result = '\x04' elif code == '^': # control character - controlCode = string.upper(scanner.chop(1)) + controlCode = scanner.chop(1).upper() if controlCode >= '@' and controlCode <= '`': result = chr(ord(controlCode) - ord('@')) elif controlCode == '?': result = '\x7f' else: - raise ParseError, "invalid escape control code" + raise ParseError("invalid escape control code") else: - raise ParseError, "unrecognized escape code" + raise ParseError("unrecognized escape code") assert result is not None self.code = result except ValueError: - raise ParseError, "invalid numeric escape code" + raise ParseError("invalid numeric escape code") def run(self, interpreter, locals): interpreter.write(self.code) @@ -1259,13 +1274,13 @@ class SignificatorToken(ExpansionToken): if loc >= 0: line = scanner.chop(loc, 1) if not line: - raise ParseError, "significator must have nonblank key" + raise ParseError("significator must have nonblank key") if line[0] in ' \t\v\n': - raise ParseError, "no whitespace between % and key" + raise ParseError("no whitespace between % and key") # Work around a subtle CPython-Jython difference by stripping # the string before splitting it: 'a '.split(None, 1) has two # elements in Jython 2.1). - fields = string.split(string.strip(line), None, 1) + fields = line.strip().split(None, 1) if len(fields) == 2 and fields[1] == '': fields.pop() self.key = fields[0] @@ -1273,12 +1288,12 @@ class SignificatorToken(ExpansionToken): fields.append(None) self.key, self.valueCode = fields else: - raise TransientParseError, "significator expects newline" + raise TransientParseError("significator expects newline") def run(self, interpreter, locals): value = self.valueCode if value is not None: - value = interpreter.evaluate(string.strip(value), locals) + value = interpreter.evaluate(value.strip(), locals) interpreter.significate(self.key, value) def string(self): @@ -1337,11 +1352,11 @@ class ExpressionToken(ExpansionToken): def string(self): result = self.testCode if self.thenCode: - result = result + '?' + self.thenCode + result += '?' + self.thenCode if self.elseCode: - result = result + '!' + self.elseCode + result += '!' + self.elseCode if self.exceptCode: - result = result + '$' + self.exceptCode + result += '$' + self.exceptCode return '%s(%s)' % (self.prefix, result) class StringLiteralToken(ExpansionToken): @@ -1439,7 +1454,7 @@ class ControlToken(ExpansionToken): scanner.acquire() i = scanner.complex('[', ']', 0) self.contents = scanner.chop(i, 1) - fields = string.split(string.strip(self.contents), ' ', 1) + fields = self.contents.strip().split(' ', 1) if len(fields) > 1: self.type, self.rest = fields else: @@ -1447,7 +1462,7 @@ class ControlToken(ExpansionToken): self.rest = None self.subtokens = [] if self.type in self.GREEDY_TYPES and self.rest is None: - raise ParseError, "control '%s' needs arguments" % self.type + raise ParseError("control '%s' needs arguments" % self.type) if self.type in self.PRIMARY_TYPES: self.subscan(scanner, self.type) self.kind = 'primary' @@ -1458,7 +1473,7 @@ class ControlToken(ExpansionToken): elif self.type in self.END_TYPES: self.kind = 'end' else: - raise ParseError, "unknown control markup: '%s'" % self.type + raise ParseError("unknown control markup: '%s'" % self.type) scanner.release() def subscan(self, scanner, primary): @@ -1466,13 +1481,11 @@ class ControlToken(ExpansionToken): while True: token = scanner.one() if token is None: - raise TransientParseError, \ - "control '%s' needs more tokens" % primary - if isinstance(token, ControlToken) and \ - token.type in self.END_TYPES: + raise TransientParseError("control '%s' needs more tokens" % primary) + if (isinstance(token, ControlToken) and + token.type in self.END_TYPES): if token.rest != primary: - raise ParseError, \ - "control must end with 'end %s'" % primary + raise ParseError("control must end with 'end %s'" % primary) break self.subtokens.append(token) @@ -1488,11 +1501,10 @@ class ControlToken(ExpansionToken): latest = [] result.append((self, latest)) for subtoken in self.subtokens: - if isinstance(subtoken, ControlToken) and \ - subtoken.kind == 'secondary': + if (isinstance(subtoken, ControlToken) and + subtoken.kind == 'secondary'): if subtoken.type not in allowed: - raise ParseError, \ - "control unexpected secondary: '%s'" % subtoken.type + raise ParseError("control unexpected secondary: '%s'" % subtoken.type) latest = [] result.append((subtoken, latest)) else: @@ -1500,7 +1512,7 @@ class ControlToken(ExpansionToken): return result def run(self, interpreter, locals): - interpreter.invoke('beforeControl', type=self.type, rest=self.rest, \ + interpreter.invoke('beforeControl', type=self.type, rest=self.rest, locals=locals) if self.type == 'if': info = self.build(['elif', 'else']) @@ -1509,8 +1521,7 @@ class ControlToken(ExpansionToken): elseTokens = info.pop()[1] for secondary, subtokens in info: if secondary.type not in ('if', 'elif'): - raise ParseError, \ - "control 'if' unexpected secondary: '%s'" % secondary.type + raise ParseError("control 'if' unexpected secondary: '%s'" % secondary.type) if interpreter.evaluate(secondary.rest, locals): self.subrun(subtokens, interpreter, locals) break @@ -1520,14 +1531,14 @@ class ControlToken(ExpansionToken): elif self.type == 'for': sides = self.IN_RE.split(self.rest, 1) if len(sides) != 2: - raise ParseError, "control expected 'for x in seq'" + raise ParseError("control expected 'for x in seq'") iterator, sequenceCode = sides info = self.build(['else']) elseTokens = None if info[-1][0].type == 'else': elseTokens = info.pop()[1] if len(info) != 1: - raise ParseError, "control 'for' expects at most one 'else'" + raise ParseError("control 'for' expects at most one 'else'") sequence = interpreter.evaluate(sequenceCode, locals) for element in sequence: try: @@ -1547,7 +1558,7 @@ class ControlToken(ExpansionToken): if info[-1][0].type == 'else': elseTokens = info.pop()[1] if len(info) != 1: - raise ParseError, "control 'while' expects at most one 'else'" + raise ParseError("control 'while' expects at most one 'else'") atLeastOnce = False while True: try: @@ -1564,24 +1575,23 @@ class ControlToken(ExpansionToken): elif self.type == 'try': info = self.build(['except', 'finally']) if len(info) == 1: - raise ParseError, "control 'try' needs 'except' or 'finally'" + raise ParseError("control 'try' needs 'except' or 'finally'") type = info[-1][0].type if type == 'except': for secondary, _tokens in info[1:]: if secondary.type != 'except': - raise ParseError, \ - "control 'try' cannot have 'except' and 'finally'" + raise ParseError("control 'try' cannot have 'except' and 'finally'") else: assert type == 'finally' if len(info) != 2: - raise ParseError, \ - "control 'try' can only have one 'finally'" + raise ParseError("control 'try' can only have one 'finally'") if type == 'except': try: self.subrun(info[0][1], interpreter, locals) except FlowError: raise - except Exception, e: + except Exception: + e = sys.exc_info()[1] for secondary, tokens in info[1:]: exception, variable = interpreter.clause(secondary.rest) if variable is not None: @@ -1597,22 +1607,21 @@ class ControlToken(ExpansionToken): finally: self.subrun(info[1][1], interpreter, locals) elif self.type == 'continue': - raise ContinueFlow, "control 'continue' without 'for', 'while'" + raise ContinueFlow("control 'continue' without 'for', 'while'") elif self.type == 'break': - raise BreakFlow, "control 'break' without 'for', 'while'" + raise BreakFlow("control 'break' without 'for', 'while'") elif self.type == 'def': signature = self.rest definition = self.substring() - code = 'def %s:\n' \ - ' r"""%s"""\n' \ - ' return %s.expand(r"""%s""", locals())\n' % \ - (signature, definition, interpreter.pseudo, definition) + code = ('def %s:\n' + ' r"""%s"""\n' + ' return %s.expand(r"""%s""", locals())\n' % + (signature, definition, interpreter.pseudo, definition)) interpreter.execute(code, locals) elif self.type == 'end': - raise ParseError, "control 'end' requires primary markup" + raise ParseError("control 'end' requires primary markup") else: - raise ParseError, \ - "control '%s' cannot be at this level" % self.type + raise ParseError("control '%s' cannot be at this level" % self.type) interpreter.invoke('afterControl') def subrun(self, tokens, interpreter, locals): @@ -1621,13 +1630,13 @@ class ControlToken(ExpansionToken): token.run(interpreter, locals) def substring(self): - return string.join(map(str, self.subtokens), '') + return ''.join(str(x) for x in self.subtokens) def string(self): if self.kind == 'primary': - return '%s[%s]%s%s[end %s]' % \ - (self.prefix, self.contents, self.substring(), \ - self.prefix, self.type) + return ('%s[%s]%s%s[end %s]' % + (self.prefix, self.contents, self.substring(), + self.prefix, self.type)) else: return '%s[%s]' % (self.prefix, self.contents) @@ -1665,23 +1674,34 @@ class Scanner: self.buffer = data self.lock = 0 - def __nonzero__(self): return self.pointer < len(self.buffer) + def __nonzero__(self): return self.pointer < len(self.buffer) # 2.x + def __bool__(self): return self.pointer < len(self.buffer) # 3.x def __len__(self): return len(self.buffer) - self.pointer - def __getitem__(self, index): return self.buffer[self.pointer + index] + + def __getitem__(self, index): + if isinstance(index, slice): + assert index.step is None or index.step == 1 + return self.__getslice__(index.start, index.stop) + else: + return self.buffer[self.pointer + index] def __getslice__(self, start, stop): + if start is None: + start = 0 + if stop is None: + stop = len(self) if stop > len(self): stop = len(self) return self.buffer[self.pointer + start:self.pointer + stop] def advance(self, count=1): """Advance the pointer count characters.""" - self.pointer = self.pointer + count + self.pointer += count def retreat(self, count=1): self.pointer = self.pointer - count if self.pointer < 0: - raise ParseError, "can't retreat back over synced out chars" + raise ParseError("can't retreat back over synced out chars") def set(self, data): """Start the scanner digesting a new batch of data; start the pointer @@ -1691,7 +1711,7 @@ class Scanner: def feed(self, data): """Feed some more data to the scanner.""" - self.buffer = self.buffer + data + self.buffer += data def chop(self, count=None, slop=0): """Chop the first count + slop characters off the front, and return @@ -1701,18 +1721,18 @@ class Scanner: assert slop == 0 count = len(self) if count > len(self): - raise TransientParseError, "not enough data to read" + raise TransientParseError("not enough data to read") result = self[:count] self.advance(count + slop) return result def acquire(self): """Lock the scanner so it doesn't destroy data on sync.""" - self.lock = self.lock + 1 + self.lock += 1 def release(self): """Unlock the scanner.""" - self.lock = self.lock - 1 + self.lock -= 1 def sync(self): """Sync up the buffer with the read head.""" @@ -1734,7 +1754,7 @@ class Scanner: """Read count chars starting from i; raise a transient error if there aren't enough characters remaining.""" if len(self) < i + count: - raise TransientParseError, "need more data to read" + raise TransientParseError("need more data to read") else: return self[i:i + count] @@ -1749,7 +1769,7 @@ class Scanner: if self[i] == quote: return quote else: - raise TransientParseError, "need to scan for rest of quote" + raise TransientParseError("need to scan for rest of quote") if self[i + 1] == self[i + 2] == quote: quote = quote * 3 if quote is not None: @@ -1768,9 +1788,9 @@ class Scanner: def find(self, sub, start=0, end=None): """Find the next occurrence of the character, or return -1.""" if end is not None: - return string.find(self.rest(), sub, start, end) + return self.rest().find(sub, start, end) else: - return string.find(self.rest(), sub, start) + return self.rest().find(sub, start) def last(self, char, start=0, end=None): """Find the first character that is _not_ the specified character.""" @@ -1780,9 +1800,9 @@ class Scanner: while i < end: if self[i] != char: return i - i = i + 1 + i += 1 else: - raise TransientParseError, "expecting other than %s" % char + raise TransientParseError("expecting other than %s" % char) def next(self, target, start=0, end=None, mandatory=False): """Scan for the next occurrence of one of the characters in @@ -1800,21 +1820,21 @@ class Scanner: quote = None else: quote = newQuote - i = i + len(newQuote) + i += len(newQuote) else: c = self[i] if quote: if c == '\\': - i = i + 1 + i += 1 else: if c in target: return i - i = i + 1 + i += 1 else: if mandatory: - raise ParseError, "expecting %s, not found" % target + raise ParseError("expecting %s, not found" % target) else: - raise TransientParseError, "expecting ending character" + raise TransientParseError("expecting ending character") def quote(self, start=0, end=None, mandatory=False): """Scan for the end of the next quote.""" @@ -1826,19 +1846,19 @@ class Scanner: while i < end: newQuote = self.check(i, quote) if newQuote: - i = i + len(newQuote) + i += len(newQuote) if newQuote == quote: return i else: c = self[i] if c == '\\': - i = i + 1 - i = i + 1 + i += 1 + i += 1 else: if mandatory: - raise ParseError, "expecting end of string literal" + raise ParseError("expecting end of string literal") else: - raise TransientParseError, "expecting end of string literal" + raise TransientParseError("expecting end of string literal") def nested(self, enter, exit, start=0, end=None): """Scan from i for an ending sequence, respecting entries and exits @@ -1850,14 +1870,14 @@ class Scanner: while i < end: c = self[i] if c == enter: - depth = depth + 1 + depth += 1 elif c == exit: - depth = depth - 1 + depth -= 1 if depth < 0: return i - i = i + 1 + i += 1 else: - raise TransientParseError, "expecting end of complex expression" + raise TransientParseError("expecting end of complex expression") def complex(self, enter, exit, start=0, end=None, skip=None): """Scan from i for an ending sequence, respecting quotes, @@ -1875,24 +1895,24 @@ class Scanner: quote = None else: quote = newQuote - i = i + len(newQuote) + i += len(newQuote) else: c = self[i] if quote: if c == '\\': - i = i + 1 + i += 1 else: if skip is None or last != skip: if c == enter: - depth = depth + 1 + depth += 1 elif c == exit: - depth = depth - 1 + depth -= 1 if depth < 0: return i last = c - i = i + 1 + i += 1 else: - raise TransientParseError, "expecting end of complex expression" + raise TransientParseError("expecting end of complex expression") def word(self, start=0): """Scan from i for a simple word.""" @@ -1901,9 +1921,9 @@ class Scanner: while i < length: if not self[i] in IDENTIFIER_CHARS: return i - i = i + 1 + i += 1 else: - raise TransientParseError, "expecting end of word" + raise TransientParseError("expecting end of word") def phrase(self, start=0): """Scan from i for a phrase (e.g., 'word', 'f(a, b, c)', 'a[i]', or @@ -1913,7 +1933,7 @@ class Scanner: while i < len(self) and self[i] in '([{': enter = self[i] if enter == '{': - raise ParseError, "curly braces can't open simple expressions" + raise ParseError("curly braces can't open simple expressions") exit = ENDING_CHARS[enter] i = self.complex(enter, exit, i + 1) + 1 return i @@ -1927,7 +1947,7 @@ class Scanner: i = self.phrase(i) # Make sure we don't end with a trailing dot. while i > 0 and self[i - 1] == '.': - i = i - 1 + i -= 1 return i def one(self): @@ -1957,7 +1977,7 @@ class Scanner: elif first in firsts: break else: - raise ParseError, "unknown markup: %s%s" % (self.prefix, first) + raise ParseError("unknown markup: %s%s" % (self.prefix, first)) token = factory(self.prefix, first) try: token.scan(self) @@ -1999,8 +2019,8 @@ class Interpreter: # Tables. - ESCAPE_CODES = {0x00: '0', 0x07: 'a', 0x08: 'b', 0x1b: 'e', 0x0c: 'f', \ - 0x7f: 'h', 0x0a: 'n', 0x0d: 'r', 0x09: 't', 0x0b: 'v', \ + ESCAPE_CODES = {0x00: '0', 0x07: 'a', 0x08: 'b', 0x1b: 'e', 0x0c: 'f', + 0x7f: 'h', 0x0a: 'n', 0x0d: 'r', 0x09: 't', 0x0b: 'v', 0x04: 'z'} ASSIGN_TOKEN_RE = re.compile(r"[_a-zA-Z][_a-zA-Z0-9]*|\(|\)|,") @@ -2017,7 +2037,7 @@ class Interpreter: # Construction, initialization, destruction. - def __init__(self, output=None, argv=None, prefix=DEFAULT_PREFIX, \ + def __init__(self, output=None, argv=None, prefix=DEFAULT_PREFIX, pseudo=None, options=None, globals=None, hooks=None): self.interpreter = self # DEPRECATED # Set up the stream. @@ -2073,8 +2093,8 @@ class Interpreter: self.shutdown() def __repr__(self): - return '<%s pseudomodule/interpreter at 0x%x>' % \ - (self.pseudo, id(self)) + return ('<%s pseudomodule/interpreter at 0x%x>' % + (self.pseudo, id(self))) def ready(self): """Declare the interpreter ready for normal operations.""" @@ -2086,16 +2106,16 @@ class Interpreter: self.globals = {} # Make sure that there is no collision between two interpreters' # globals. - if self.globals.has_key(self.pseudo): + if self.pseudo in self.globals: if self.globals[self.pseudo] is not self: - raise Error, "interpreter globals collision" + raise Error("interpreter globals collision") self.globals[self.pseudo] = self def unfix(self): """Remove the pseudomodule (if present) from the globals.""" UNWANTED_KEYS = [self.pseudo, '__builtins__'] for unwantedKey in UNWANTED_KEYS: - if self.globals.has_key(unwantedKey): + if unwantedKey in self.globals: del self.globals[unwantedKey] def update(self, other): @@ -2186,7 +2206,7 @@ class Interpreter: def include(self, fileOrFilename, locals=None): """Do an include pass on a file or filename.""" - if type(fileOrFilename) is types.StringType: + if isinstance(fileOrFilename, (_str, _unicode)): # Either it's a string representing a filename ... filename = fileOrFilename name = filename @@ -2201,7 +2221,7 @@ class Interpreter: def expand(self, data, locals=None): """Do an explicit expansion on a subordinate stream.""" - outFile = StringIO.StringIO() + outFile = StringIO() stream = Stream(outFile) self.invoke('beforeExpand', string=data, locals=locals) self.streams.push(stream) @@ -2229,7 +2249,7 @@ class Interpreter: except TransientParseError: pass result.append(data[i:]) - result = string.join(result, '') + result = ''.join(result) self.invoke('afterQuote', result=result) return result @@ -2241,8 +2261,8 @@ class Interpreter: for char in data: if char < ' ' or char > '~': charOrd = ord(char) - if Interpreter.ESCAPE_CODES.has_key(charOrd): - result.append(self.prefix + '\\' + \ + if charOrd in Interpreter.ESCAPE_CODES: + result.append(self.prefix + '\\' + Interpreter.ESCAPE_CODES[charOrd]) else: result.append(self.prefix + '\\x%02x' % charOrd) @@ -2250,7 +2270,7 @@ class Interpreter: result.append(self.prefix + '\\' + char) else: result.append(char) - result = string.join(result, '') + result = ''.join(result) self.invoke('afterEscape', result=result) return result @@ -2260,21 +2280,23 @@ class Interpreter: """Wrap around an application of a callable and handle errors. Return whether no error occurred.""" try: - apply(callable, args) + callable(*args) self.reset() return True - except KeyboardInterrupt, e: + except KeyboardInterrupt: # Handle keyboard interrupts specially: we should always exit # from these. + e = sys.exc_info()[1] self.fail(e, True) - except Exception, e: + except Exception: # A standard exception (other than a keyboard interrupt). + e = sys.exc_info()[1] self.fail(e) except: # If we get here, then either it's an exception not derived from # Exception or it's a string exception, so get the error type # from the sys module. - e = sys.exc_type + e = sys.exc_info()[1] self.fail(e) # An error occurred if we leak through to here, so do cleanup. self.reset() @@ -2326,7 +2348,7 @@ class Interpreter: if self.options.get(BANGPATH_OPT, True) and self.prefix: # Replace a bangpath at the beginning of the first line # with an EmPy comment. - if string.find(line, BANGPATH) == 0: + if line.startswith(BANGPATH): line = self.prefix + '#' + line[2:] first = False if line: @@ -2343,7 +2365,7 @@ class Interpreter: chunkSize = DEFAULT_CHUNK_SIZE context = Context(name, units='bytes') self.contexts.push(context) - self.invoke('beforeBinary', name=name, file=file, \ + self.invoke('beforeBinary', name=name, file=file, chunkSize=chunkSize, locals=locals) scanner = Scanner(self.prefix) done = False @@ -2405,9 +2427,9 @@ class Interpreter: result = [] stack = [result] for garbage in self.ASSIGN_TOKEN_RE.split(name): - garbage = string.strip(garbage) + garbage = garbage.strip() if garbage: - raise ParseError, "unexpected assignment token: '%s'" % garbage + raise ParseError("unexpected assignment token: '%s'" % garbage) tokens = self.ASSIGN_TOKEN_RE.findall(name) # While processing, put a None token at the start of any list in which # commas actually appear. @@ -2458,12 +2480,12 @@ class Interpreter: try: values = tuple(values) except TypeError: - raise TypeError, "unpack non-sequence" + raise TypeError("unpack non-sequence") if len(names) != len(values): - raise ValueError, "unpack tuple of wrong size" + raise ValueError("unpack tuple of wrong size") for i in range(len(names)): name = names[i] - if type(name) is types.StringType: + if isinstance(name, _str) or isinstance(name, _unicode): self.atomic(name, values[i], locals) else: self.multi(name, values[i], locals) @@ -2474,7 +2496,7 @@ class Interpreter: left = self.tokenize(name) # The return value of tokenize can either be a string or a list of # (lists of) strings. - if type(left) is types.StringType: + if isinstance(left, _str) or isinstance(left, _unicode): self.atomic(left, value, locals) else: self.multi(left, value, locals) @@ -2492,11 +2514,11 @@ class Interpreter: self.invoke('beforeClause', catch=catch, locals=locals) if catch is None: exceptionCode, variable = None, None - elif string.find(catch, ',') >= 0: - exceptionCode, variable = string.split(string.strip(catch), ',', 1) - variable = string.strip(variable) + elif catch.find(',') >= 0: + exceptionCode, variable = catch.strip().split(',', 1) + variable = variable.strip() else: - exceptionCode, variable = string.strip(catch), None + exceptionCode, variable = catch.strip(), None if not exceptionCode: exception = Exception else: @@ -2517,17 +2539,18 @@ class Interpreter: def defined(self, name, locals=None): """Return a Boolean indicating whether or not the name is defined either in the locals or the globals.""" - self.invoke('beforeDefined', name=name, local=local) + self.invoke('beforeDefined', name=name, locals=locals) if locals is not None: - if locals.has_key(name): + if name in locals: result = True else: result = False - elif self.globals.has_key(name): + elif name in self.globals: result = True else: result = False self.invoke('afterDefined', result=result) + return result def literal(self, text): """Process a string literal.""" @@ -2543,7 +2566,7 @@ class Interpreter: if expression in ('0', 'False'): return False self.push() try: - self.invoke('beforeEvaluate', \ + self.invoke('beforeEvaluate', expression=expression, locals=locals) if locals is not None: result = eval(expression, self.globals, locals) @@ -2559,20 +2582,17 @@ class Interpreter: # If there are any carriage returns (as opposed to linefeeds/newlines) # in the statements code, then remove them. Even on DOS/Windows # platforms, - if string.find(statements, '\r') >= 0: - statements = string.replace(statements, '\r', '') + if statements.find('\r') >= 0: + statements = statements.replace('\r', '') # If there are no newlines in the statements code, then strip any # leading or trailing whitespace. - if string.find(statements, '\n') < 0: - statements = string.strip(statements) + if statements.find('\n') < 0: + statements = statements.strip() self.push() try: - self.invoke('beforeExecute', \ + self.invoke('beforeExecute', statements=statements, locals=locals) - if locals is not None: - exec statements in self.globals, locals - else: - exec statements in self.globals + _exec(statements, self.globals, locals) self.invoke('afterExecute') finally: self.pop() @@ -2582,13 +2602,10 @@ class Interpreter: entered into the Python interactive interpreter.""" self.push() try: - self.invoke('beforeSingle', \ + self.invoke('beforeSingle', source=source, locals=locals) code = compile(source, '', 'single') - if locals is not None: - exec code in self.globals, locals - else: - exec code in self.globals + _exec(code, self.globals, locals) self.invoke('afterSingle') finally: self.pop() @@ -2620,7 +2637,7 @@ class Interpreter: hook.push() try: method = getattr(hook, _name) - apply(method, (), keywords) + method(*(), **keywords) finally: hook.pop() @@ -2673,7 +2690,7 @@ class Interpreter: # installed it before ... if Interpreter._wasProxyInstalled: # ... and if so, we have a proxy problem. - raise Error, "interpreter stdout proxy lost" + raise Error("interpreter stdout proxy lost") else: # Otherwise, install the proxy and set the flag. sys.stdout = ProxyFile(sys.stdout) @@ -2779,7 +2796,7 @@ class Interpreter: def invokeHook(self, _name, **keywords): """Manually invoke a hook.""" - apply(self.invoke, (_name,), keywords) + self.invoke(*(_name,), **keywords) # Callbacks. @@ -2799,7 +2816,7 @@ class Interpreter: """Invoke the callback.""" if self.callback is None: if self.options.get(CALLBACK_OPT, False): - raise Error, "custom markup invoked with no defined callback" + raise Error("custom markup invoked with no defined callback") else: self.callback(contents) @@ -2809,7 +2826,7 @@ class Interpreter: """Flatten the contents of the pseudo-module into the globals namespace.""" if keys is None: - keys = self.__dict__.keys() + self.__class__.__dict__.keys() + keys = list(self.__dict__.keys()) + list(self.__class__.__dict__.keys()) dict = {} for key in keys: # The pseudomodule is really a class instance, so we need to @@ -2878,8 +2895,7 @@ class Interpreter: def getAllDiversions(self): """Get the names of all existing diversions.""" - names = self.stream().diversions.keys() - names.sort() + names = sorted(self.stream().diversions.keys()) return names # Filter. @@ -2939,7 +2955,7 @@ class Processor: self.documents = {} def scan(self, basename, extensions=DEFAULT_EMPY_EXTENSIONS): - if type(extensions) is types.StringType: + if isinstance(extensions, _str): extensions = (extensions,) def _noCriteria(x): return True @@ -2962,7 +2978,7 @@ class Processor: if depth <= 0: return else: - depth = depth - 1 + depth -= 1 filenames = os.listdir(basename) for filename in filenames: pathname = os.path.join(basename, filename) @@ -2987,7 +3003,7 @@ class Processor: match = self.SIGNIFICATOR_RE.search(line) if match: key, valueS = match.groups() - valueS = string.strip(valueS) + valueS = valueS.strip() if valueS: value = eval(valueS) else: @@ -2995,7 +3011,7 @@ class Processor: document.significators[key] = value -def expand(_data, _globals=None, \ +def expand(_data, _globals=None, _argv=None, _prefix=DEFAULT_PREFIX, _pseudo=None, _options=None, \ **_locals): """Do an atomic expansion of the given source data, creating and @@ -3007,8 +3023,8 @@ def expand(_data, _globals=None, \ # dictionary at all. _locals = None output = NullFile() - interpreter = Interpreter(output, argv=_argv, prefix=_prefix, \ - pseudo=_pseudo, options=_options, \ + interpreter = Interpreter(output, argv=_argv, prefix=_prefix, + pseudo=_pseudo, options=_options, globals=_globals) if interpreter.options.get(OVERRIDE_OPT, True): oldStdout = sys.stdout @@ -3026,7 +3042,7 @@ def environment(name, default=None): """Get data from the current environment. If the default is True or False, then presume that we're only interested in the existence or non-existence of the environment variable.""" - if os.environ.has_key(name): + if name in os.environ: # Do the True/False test by value for future compatibility. if default == False or default == True: return True @@ -3072,8 +3088,7 @@ Welcome to EmPy version %s.""" % (programName, __version__)) warn("Valid escape sequences are:") info(ESCAPE_INFO) warn() - warn("The %s pseudomodule contains the following attributes:" % \ - DEFAULT_PSEUDOMODULE_NAME) + warn("The %s pseudomodule contains the following attributes:" % DEFAULT_PSEUDOMODULE_NAME) info(PSEUDOMODULE_INFO) warn() warn("The following environment variables are recognized:") @@ -3109,7 +3124,7 @@ def invoke(args): _pauseAtEnd = False _relativePath = False if _extraArguments is not None: - _extraArguments = string.split(_extraArguments) + _extraArguments = _extraArguments.split() args = _extraArguments + args # Parse the arguments. pairs, remainder = getopt.getopt(args, 'VhHvkp:m:frino:a:buBP:I:D:E:F:', ['version', 'help', 'extended-help', 'verbose', 'null-hook', 'suppress-errors', 'prefix=', 'no-prefix', 'module=', 'flatten', 'raw-errors', 'interactive', 'no-override-stdout', 'binary', 'chunk-size=', 'output=' 'append=', 'preprocess=', 'import=', 'define=', 'execute=', 'execute-file=', 'buffered-output', 'pause-at-end', 'relative-path', 'no-callback-error', 'no-bangpath-processing', 'unicode', 'unicode-encoding=', 'unicode-input-encoding=', 'unicode-output-encoding=', 'unicode-errors=', 'unicode-input-errors=', 'unicode-output-errors=']) @@ -3159,8 +3174,8 @@ def invoke(args): elif option in ('-P', '--preprocess'): _preprocessing.append(('pre', argument)) elif option in ('-I', '--import'): - for module in string.split(argument, ','): - module = string.strip(module) + for module in argument.split(','): + module = module.strip() _preprocessing.append(('import', module)) elif option in ('-D', '--define'): _preprocessing.append(('define', argument)) @@ -3191,31 +3206,31 @@ def invoke(args): elif option in ('--unicode-output-errors',): _unicodeOutputErrors = argument # Set up the Unicode subsystem if required. - if _unicode or \ - _unicodeInputEncoding or _unicodeOutputEncoding or \ - _unicodeInputErrors or _unicodeOutputErrors: - theSubsystem.initialize(_unicodeInputEncoding, \ - _unicodeOutputEncoding, \ + if (_unicode or + _unicodeInputEncoding or _unicodeOutputEncoding or + _unicodeInputErrors or _unicodeOutputErrors): + theSubsystem.initialize(_unicodeInputEncoding, + _unicodeOutputEncoding, _unicodeInputErrors, _unicodeOutputErrors) # Now initialize the output file if something has already been selected. if _output is not None: - _output = apply(AbstractFile, _output) + _output = AbstractFile(*_output) # Set up the main filename and the argument. if not remainder: remainder.append('-') filename, arguments = remainder[0], remainder[1:] # Set up the interpreter. if _options[BUFFERED_OPT] and _output is None: - raise ValueError, "-b only makes sense with -o or -a arguments" + raise ValueError("-b only makes sense with -o or -a arguments") if _prefix == 'None': _prefix = None - if _prefix and type(_prefix) is types.StringType and len(_prefix) != 1: - raise Error, "prefix must be single-character string" - interpreter = Interpreter(output=_output, \ - argv=remainder, \ - prefix=_prefix, \ - pseudo=_pseudo, \ - options=_options, \ + if (_prefix and isinstance(_prefix, _str) and len(_prefix) != 1): + raise Error("prefix must be single-character string") + interpreter = Interpreter(output=_output, + argv=remainder, + prefix=_prefix, + pseudo=_pseudo, + options=_options, hooks=_hooks) try: # Execute command-line statements. @@ -3227,7 +3242,7 @@ def invoke(args): name = thing elif which == 'define': command = interpreter.string - if string.find(thing, '=') >= 0: + if thing.find('=') >= 0: target = '%s{%s}' % (_prefix, thing) else: target = '%s{%s = None}' % (_prefix, thing) @@ -3239,7 +3254,7 @@ def invoke(args): elif which == 'file': command = interpreter.string name = '' % (i, thing) - target = '%s{execfile("""%s""")}' % (_prefix, thing) + target = '%s{exec(open("""%s""").read())}' % (_prefix, thing) elif which == 'import': command = interpreter.string name = '' % i @@ -3247,7 +3262,7 @@ def invoke(args): else: assert 0 interpreter.wrap(command, (target, name)) - i = i + 1 + i += 1 # Now process the primary file. interpreter.ready() if filename == '-': @@ -3277,7 +3292,7 @@ def invoke(args): # Finally, if we should pause at the end, do it. if _pauseAtEnd: try: - raw_input() + _input() except EOFError: pass diff --git a/src/finalize.py b/src/finalize.py index 7ccb7f0..2efc033 100644 --- a/src/finalize.py +++ b/src/finalize.py @@ -30,8 +30,8 @@ # etc. # +from __future__ import print_function import sys, string -from types import StringType import mk, errors, config, utils @@ -76,7 +76,7 @@ def finalEvaluation(outputVarsOnly=1): list.append((mk.vars,v,None)) else: for v in mk.vars: - if type(mk.vars[v]) is StringType: + if type(mk.vars[v]) is str: if '$' in mk.vars[v]: list.append((mk.vars,v,None)) @@ -88,7 +88,7 @@ def finalEvaluation(outputVarsOnly=1): else: for t in mk.targets.values(): for v in t.vars: - if type(t.vars[v]) is StringType: + if type(t.vars[v]) is str: if '$' in t.vars[v]: list.append((t.vars,v,t)) @@ -107,7 +107,7 @@ def finalEvaluation(outputVarsOnly=1): mk.__resetUsageTracker(reset_coverage=0) try: new = mk.evalExpr(expr, target=target) - except Exception, e: + except Exception as e: raise errors.Error("failed to set variable '%s' to value '%s': %s" % (obj, expr, e)) if expr != new: if dict == None: obj.value = new @@ -359,15 +359,15 @@ def replaceEscapeSequences(): return s.replace('$', '$') if config.verbose: - print 'replacing escape sequences' + print('replacing escape sequences') for v in mk.vars: - if type(mk.vars[v]) is StringType: + if type(mk.vars[v]) is str: mk.vars[v] = _repl(mk.vars[v]) for v in mk.make_vars: mk.make_vars[v] = _repl(mk.make_vars[v]) for t in mk.targets.values(): for v in t.vars: - if type(t.vars[v]) is StringType: + if type(t.vars[v]) is str: t.vars[v] = _repl(t.vars[v]) for o in mk.options.values(): if o.default != None: diff --git a/src/flatten.py b/src/flatten.py index 2f5fa90..d0a6c4a 100644 --- a/src/flatten.py +++ b/src/flatten.py @@ -30,6 +30,7 @@ # variables, such as MSVC project files. # +from __future__ import print_function import config, copy import errors, mk import finalize @@ -50,8 +51,8 @@ def makeConfigs(): # the option is irrelevant because we can simply substitute # default value: if config.verbose: - print "using default value '%s' for option '%s'" % \ - (option.default, option.name) + print("using default value '%s' for option '%s'" % \ + (option.default, option.name)) return cfgs out = [] name = option.name @@ -211,9 +212,9 @@ def flatten(): cfgs = [{}] if config.verbose: - print '%i configurations' % len(cfgs) + print('%i configurations' % len(cfgs)) if config.debug: - for c in cfgs: print '[dbg] %s' % c + for c in cfgs: print('[dbg] %s' % c) # remove options and conditional variables: mk.__vars_opt = {} diff --git a/src/mk.py b/src/mk.py index 79f6312..2ce0bfc 100644 --- a/src/mk.py +++ b/src/mk.py @@ -26,6 +26,7 @@ # Makefile variables, conditions etc. and evaluation code are located here # +from __future__ import print_function import utils, errors, config, containers import bkl_c from utils import * @@ -84,7 +85,7 @@ class Option: try: self.default = evalExpr(self.default, use_options=0) - except NameError, err: + except NameError as err: raise errors.Error("can't use options or conditional variables in default value of option '%s' (%s)" % (self.name, err), context=self.context) @@ -93,7 +94,7 @@ class Option: if self.values != None and self.default not in self.values: # unless the user explicitely wanted to avoid this kind of check: if not self.forceDefault: - print self.context + print(self.context) raise errors.Error("default value '%s' for option '%s' is not among allowed values (%s)" % (self.default, self.name, ','.join(self.values)), context=self.context) @@ -108,6 +109,7 @@ class Condition: self.value = value def __cmp__(self, other): + print('MEOW!!!!') return cmp((self.option.name, self.value), (other.option.name, other.value)) @@ -266,7 +268,7 @@ def setVar(name, value, eval=1, target=None, add_dict=None, store_in=None, else: delCondVar(name) if config.debug: - print "[dbg] overwriting option/condvar %s" % name + print("[dbg] overwriting option/condvar %s" % name) # if this is files list, we certainly don't want to use any other # separators than single space; but don't do this if there's an embedded @@ -277,7 +279,7 @@ def setVar(name, value, eval=1, target=None, add_dict=None, store_in=None, if eval: try: v = evalExpr(value, target=target, add_dict=add_dict) - except Exception,e: + except Exception as e: raise errors.Error("failed to set variable '%s' to value '%s': %s" % (name, value, e)) else: v = value @@ -391,8 +393,8 @@ def removeCondVarDependencyFromCondition(condvar, condvarvalue): # notify the user about this conversion if config.debug: - print "[dbg] the '%s==%s' expression has been converted to '%s'" \ - % (condvar.name, condvarvalue, converted_cond) + print("[dbg] the '%s==%s' expression has been converted to '%s'" \ + % (condvar.name, condvarvalue, converted_cond)) return condexpr_list def makeCondition(cond_str): @@ -445,6 +447,9 @@ def makeCondition(cond_str): cname = '_'.join(['%s_%s' % (e.option.name, safeValue(e.value)) \ for e in condexpr_list]) if cname in conditions: + print('BLAH A', type(conditions[cname].exprs[0].option.name), type(conditions[cname].exprs[0].value)) + print('BLAH B', type(condexpr_list[0].option.name), type(condexpr_list[0].value)) + print('BLAH C', type(conditions[cname].exprs[0]), type(condexpr_list[0])) assert conditions[cname].exprs == condexpr_list return conditions[cname] else: @@ -484,8 +489,6 @@ def __resetUsageTracker(reset_coverage): __curNamespace = {} -False, True = 0, 1 - # cache for compiled Python expressions: __pyExprPrecompiled = {} @@ -511,7 +514,7 @@ def __evalPyExpr(nothing, expr, use_options=1, target=None, add_dict=None): __usageTracker.map[expr] = 1 else: __usageTracker.vars += 1 - return d[expr] + return d[expr].encode('ascii') if __trackUsage: __usageTracker.pyexprs += 1 @@ -537,9 +540,12 @@ def __evalPyExpr(nothing, expr, use_options=1, target=None, add_dict=None): if val == True: val = 1 elif val == False: val = 0 __curNamespace = oldNS - return str(val) + return str(val).encode('ascii') -__doEvalExpr = bkl_c.doEvalExpr +def __doEvalExpr(expr, valCallb, textCallb, moreArgs, use_options, target, + add_dict): + return bkl_c.doEvalExpr(expr.encode('ascii'), valCallb, textCallb,moreArgs, + use_options, target, add_dict).decode('ascii') def evalExpr(e, use_options=1, target=None, add_dict=None): try: @@ -548,9 +554,9 @@ def evalExpr(e, use_options=1, target=None, add_dict=None): use_options, target, add_dict) - except KeyError, err: + except KeyError as err: raise RuntimeError("undefined variable %s" % err) - except errors.ErrorBase, err: + except errors.ErrorBase as err: raise RuntimeError(err.getErrorMessage()) @@ -568,7 +574,7 @@ def importPyModule(modname): try: exec('import utils.%s' % modname, globals()) if config.verbose: - print 'imported python module utils.%s' % modname + print('imported python module utils.%s' % modname) if config.track_deps: __recordDeps('utils/%s' % modname) imported = True @@ -578,7 +584,7 @@ def importPyModule(modname): try: exec('import %s' % modname, globals()) if config.verbose: - print 'imported python module %s' % modname + print('imported python module %s' % modname) if config.track_deps: __recordDeps(modname) imported = True @@ -586,11 +592,11 @@ def importPyModule(modname): pass if config.debug: - print '[dbg] --- after importing module %s --' % modname - print '[dbg] sys.path=%s' % sys.path + print('[dbg] --- after importing module %s --' % modname) + print('[dbg] sys.path=%s' % sys.path) if modname in sys.modules: - print '[dbg] sys.modules[%s]=%s' % (modname,sys.modules[modname]) + print('[dbg] sys.modules[%s]=%s' % (modname,sys.modules[modname])) else: - print '[dbg] module not loaded!' + print('[dbg] module not loaded!') return imported diff --git a/src/mk_dump.py b/src/mk_dump.py index 8ec32d8..091cb01 100644 --- a/src/mk_dump.py +++ b/src/mk_dump.py @@ -26,29 +26,30 @@ # Dumps parsed bakefiles content # +from __future__ import print_function import mk def dumpMakefile(): - print '\nVariables:' + print('\nVariables:') for v in mk.vars: - print ' %-30s = %s' % (v, mk.vars[v]) + print(' %-30s = %s' % (v, mk.vars[v])) - print '\nOptions:' + print('\nOptions:') for o in mk.options.values(): - print ' %-30s (default:%s,values:%s)' % (o.name, o.default, o.values) + print(' %-30s (default:%s,values:%s)' % (o.name, o.default, o.values)) - print '\nConditions:' + print('\nConditions:') for c in mk.conditions.values(): - print ' %-30s (%s)' % (c.name,c.exprs) + print(' %-30s (%s)' % (c.name,c.exprs)) - print '\nConditional variables:' + print('\nConditional variables:') for v in mk.cond_vars.values(): - print ' %-30s' % v.name + print(' %-30s' % v.name) for vv in v.values: - print ' if %-25s = %s' % (vv.cond.name, vv.value) + print(' if %-25s = %s' % (vv.cond.name, vv.value)) - print '\nTargets:' + print('\nTargets:') for t in mk.targets.values(): - print ' %s %s' % (t.type, t.id) + print(' %s %s' % (t.type, t.id)) for v in t.vars: - print ' %-28s = %s' % (v, t.vars[v]) + print(' %-28s = %s' % (v, t.vars[v])) diff --git a/src/portautils.py b/src/portautils.py index 290438c..8d7d0a7 100644 --- a/src/portautils.py +++ b/src/portautils.py @@ -64,7 +64,7 @@ if os.name == 'nt': hfile = win32file._get_osfhandle(file.fileno()) win32file.LockFileEx(hfile, win32con.LOCKFILE_EXCLUSIVE_LOCK, 0, 0x7fffffff, __overlapped) - except pywintypes.error, e: + except pywintypes.error as e: # err 120 is unimplemented call, happens on win9x: if e.args[0] != 120: raise e @@ -73,7 +73,7 @@ if os.name == 'nt': try: hfile = win32file._get_osfhandle(file.fileno()) win32file.UnlockFileEx(hfile, 0, 0x7fffffff, __overlapped) - except pywintypes.error, e: + except pywintypes.error as e: # err 120 is unimplemented call, happens on win9x: if e.args[0] != 120: raise e diff --git a/src/reader.py b/src/reader.py index ae5799f..6c44251 100644 --- a/src/reader.py +++ b/src/reader.py @@ -26,6 +26,7 @@ # Reading and interpreting the makefiles # +from __future__ import print_function import string, sys, copy, os, os.path import xmlparser import mk @@ -36,13 +37,10 @@ import config import finalize import dependencies -def reraise(): - raise sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2] - def evalConstExpr(e, str, target=None, add_dict=None): try: return mk.evalExpr(str, use_options=0, target=target, add_dict=add_dict) - except NameError, err: + except NameError as err: raise ReaderError(e, "can't use options or conditional variables in this context (%s)" % err) @@ -156,12 +154,12 @@ def handleSet(e, target=None, add_dict=None): # Condition never met when generating this target: if typ == '0': if config.debug: - print "[dbg] removing never-met condition '%s' for variable '%s'" % (condstr, name) + print("[dbg] removing never-met condition '%s' for variable '%s'" % (condstr, name)) continue # Condition always met: elif typ == '1': if config.debug: - print "[dbg] condition '%s' for variable '%s' is always met" % (condstr, name) + print("[dbg] condition '%s' for variable '%s' is always met" % (condstr, name)) noValueSet = 0 isCond = 0 value = e_if.value @@ -462,8 +460,8 @@ def _extractTargetNodes(parent, list, target, tags, index): if len(index[name]) > 0: if config.debug: n2 = index[name][0] - print '[dbg] (thrown away <%s> @%s in favor of @%s)' % \ - (name, n2.node.location(), n.node.location()) + print('[dbg] (thrown away <%s> @%s in favor of @%s)' % \ + (name, n2.node.location(), n.node.location())) _removeNode(index[name][0]) # else: this can happen if _removeNode was called recursively index[name] = [n] @@ -501,9 +499,9 @@ def _reorderTargetNodes(node, index): def _reorderNodes(first, second): if config.debug: - print '[dbg] (reordering <%s> @%s <=> <%s> @%s)' % \ + print('[dbg] (reordering <%s> @%s <=> <%s> @%s)' % \ (first.node.name, first.node.location(), - second.node.name, second.node.location()) + second.node.name, second.node.location())) # find nearest shared parent: parents1 = [] @@ -546,7 +544,7 @@ def _extractDictForTag(e, target, dict): # $(value) expands to the thing passed as tag's text value: try: dict2 = {'value' : mk.evalExpr(e.value, target=target, add_dict=dict)} - except Exception, err: + except Exception as err: raise errors.Error("incorrect argument value '%s': %s" % (e.value, err)) @@ -556,7 +554,7 @@ def _extractDictForTag(e, target, dict): for a in e.props: try: attr[a] = mk.evalExpr(e.props[a], target=target, add_dict=dict) - except Exception, err: + except Exception as err: raise errors.Error("incorrect value '%s' of attribute '%s': %s" % (e.props[a], a, err)) dict2['attributes'] = attr @@ -605,9 +603,9 @@ def _processTargetNodes(node, target, tags, dict): return 1 if config.debug: - print '[dbg] -----------------------------------------' - print '[dbg] * tags tree for target %s:' % target.id - print '[dbg] -----------------------------------------' + print('[dbg] -----------------------------------------') + print('[dbg] * tags tree for target %s:' % target.id) + print('[dbg] -----------------------------------------') root = TgtCmdNode(target, None, TgtCmdNode.TARGET, node) root.dict = dict @@ -626,7 +624,7 @@ def _processTargetNodes(node, target, tags, dict): if len(n.node.props) > 0: for p in n.node.props: props += " %s='%s'" % (p, n.node.props[p]) - print '[dbg] %s<%s%s> %s' % (indent, n.node.name, props, value) + print('[dbg] %s<%s%s> %s' % (indent, n.node.name, props, value)) for c in n.children: dumpTgtNode(c, level+1) dumpTgtNode(root, -1) @@ -882,28 +880,26 @@ availableFiles = [] def buildModulesList(): class ModuleInfo: pass - def visit(basedir, dirname, names): - dir = dirname[len(basedir)+1:] - if dir != '': - dircomp = dir.split(os.sep) - else: - dircomp = [] - for n in names: - ext =os.path.splitext(n)[1] - if ext != '.bakefile' and ext != '.bkl': continue - i = ModuleInfo() - i.file = os.path.join(dirname,n) - i.modules = dircomp + os.path.splitext(n)[0].split('-') - availableFiles.append(i) - for p in config.searchPath: - os.path.walk(p, visit, p) + for dirname, _, names in os.walk(p): + dir = dirname[len(p)+1:] + if dir != '': + dircomp = dir.split(os.sep) + else: + dircomp = [] + for n in names: + ext = os.path.splitext(n)[1] + if ext != '.bakefile' and ext != '.bkl': continue + i = ModuleInfo() + i.file = os.path.join(dirname, n) + i.modules = dircomp + os.path.splitext(n)[0].split('-') + availableFiles.append(i) def loadModule(m): if m in loadedModules: return - if config.verbose: print "loading module '%s'..." % m + if config.verbose: print("loading module '%s'..." % m) loadedModules.append(m) # set USING_ variable: @@ -1012,11 +1008,11 @@ def handleEcho(e, target=None, add_dict=None): level = 'normal' if level == 'normal': - print text + print(text) elif level == 'verbose' and config.verbose: - print text + print(text) elif level == 'debug' and config.debug: - print text + print(text) elif level == 'warning': # FIXME: DEPRECATED (since 0.2.3) _printWarning(None, text) @@ -1086,7 +1082,7 @@ def __doProcess(file=None, strdata=None, xmldata=None): try: processNodes(m.children) except ReaderError: - reraise() + raise # FIXME: enable this code when finished programming: #except Exception, ex: # raise ReaderError(e, ex) @@ -1101,11 +1097,11 @@ def processFile(filename, onlyOnce=False): filename = os.path.abspath(filename) if onlyOnce and filename in includedFiles: if config.verbose: - print "file %s already included, skipping..." % filename + print("file %s already included, skipping..." % filename) return includedFiles.append(filename) if config.verbose: - print 'loading %s...' % filename + print('loading %s...' % filename) if config.track_deps: dependencies.addDependency(mk.vars['INPUT_FILE'], config.format, filename) @@ -1158,8 +1154,8 @@ def read(filename): checkTagDefinitions() finalize.finalize() return 1 - except errors.ErrorBase, e: + except errors.ErrorBase as e: if config.debug: - reraise() + raise sys.stderr.write(str(e)) return 0 diff --git a/src/utils.py b/src/utils.py index a217e26..c04699c 100644 --- a/src/utils.py +++ b/src/utils.py @@ -26,6 +26,7 @@ # Misc utility functions for use in Bakefiles # +from __future__ import print_function import sys, os, os.path, string, glob if sys.version_info < (2,4): from sets import Set as set @@ -171,7 +172,7 @@ def substitute2(str, callback, desc=None, cond=None, hints='', caller=None): var.add(v.cond, v.value) else: var.add(v.cond, callback(cond2, v.value)) - return '$(%s)' % var.name + return ('$(%s)' % var.name).encode('ascii') if expr in mk.options and mk.options[expr].values != None: opt = mk.options[expr] @@ -185,19 +186,19 @@ def substitute2(str, callback, desc=None, cond=None, hints='', caller=None): else: if len(v) == 0 or v.isspace(): var.add(cond, v) else: var.add(cond, callback(cond2, v)) - return '$(%s)' % var.name + return ('$(%s)' % var.name).encode('ascii') if expr in __substituteCallbacks: for func in __substituteCallbacks[expr]: rval = func(expr, callback, caller) if rval != None: - return rval + return rval.encode('ascii') raise errors.Error("'%s' can't be used in this context, "%expr + "not a conditional variable or option with listed values") def callbackTxt(cond, expr): if len(expr) == 0 or expr.isspace(): return expr - return callback(cond, expr) + return callback(cond, expr).encode('ascii') return mk.__doEvalExpr(str, callbackVar, callbackTxt, cond, # moreArgs @@ -250,7 +251,7 @@ def getPossibleValues(expr): ret += getPossibleValues(v.value) else: ret.append(v.value) - return ' '.join(ret) + return ' '.join(ret).encode('ascii') if expr in mk.options and mk.options[expr].values != None: opt = mk.options[expr] @@ -259,13 +260,13 @@ def getPossibleValues(expr): ret += getPossibleValues(v) else: ret.append(v) - return ' '.join(ret) + return ' '.join(ret).encode('ascii') # don't know what else to try, return as-is - return expr + return expr.encode('ascii') def callbackTxt(nothing, expr): - return expr + return expr.encode('ascii') return mk.__doEvalExpr(expr, callbackVar, callbackTxt, None, 1, None, None).split() @@ -489,8 +490,8 @@ def sources2objects(sources, target, ext, objSuffix=''): else: hardFiles.append(f) if config.verbose: - print ' making object rules (%i of %i hard)' % \ - (len(hardFiles), len(hardFiles)+len(easyFiles)) + print(' making object rules (%i of %i hard)' % \ + (len(hardFiles), len(hardFiles)+len(easyFiles))) # there's only one rule for this object file, therefore we don't care # about its condition, if any: @@ -566,12 +567,12 @@ def __certainlyNotEmpty(value): # but they default to empty value if the set of its # conditions is not exhaustive and so checking all items # of its 'values' members is not enough, we'd have to verify - return '' + return ''.encode('ascii') def textCb(helper, txt): if len(txt) > 0: helper.ok = True - return '' + return ''.encode('ascii') mk.__doEvalExpr(value, varCb, textCb, helper, # extra argument passed to callbacks @@ -861,8 +862,8 @@ def fileList(pathlist): files.sort() if config.debug: - print "fileList('%s'): matches for '%s' pattern found: '%s'" % \ - (path, os.path.normpath(p), ' '.join(files)) + print("fileList('%s'): matches for '%s' pattern found: '%s'" % \ + (path, os.path.normpath(p), ' '.join(files))) return ' '.join(files) @@ -871,7 +872,7 @@ def fileList(pathlist): elif isinstance(pathlist, list): ret = ' '.join([__fileList(path) for path in pathlist]) if config.debug: - print "fileList(%s): returned '%s'" % (pathlist, ret.strip()) + print("fileList(%s): returned '%s'" % (pathlist, ret.strip())) return ret else: raise errors.Error('fileList() function only accepts a string or a python list of strings as argument') diff --git a/src/writer.py b/src/writer.py index a6cb98f..eef20a4 100644 --- a/src/writer.py +++ b/src/writer.py @@ -26,10 +26,10 @@ # Writes parsed bakefile to a makefile # +from __future__ import print_function import types, copy, sys, os, os.path, string import mk, config, errors, dependencies import outmethods, portautils -from types import StringType mergeBlocks = outmethods.mergeBlocks mergeBlocksWithFilelist = outmethods.mergeBlocksWithFilelist @@ -67,7 +67,7 @@ def __copyMkToVars(): # Copy variables: for v in mk.vars: if v == 'targets': continue - if type(mk.vars[v]) is StringType: + if type(mk.vars[v]) is str: dict[v] = mk.vars[v].strip() else: dict[v] = mk.vars[v] @@ -88,7 +88,7 @@ def __copyMkToVars(): elif v == 'distinctConfigs': t.distinctConfigs = tar.vars['distinctConfigs'] else: - if type(tar.vars[v]) is StringType: + if type(tar.vars[v]) is str: setattr(t, v, tar.vars[v].strip()) else: setattr(t, v, tar.vars[v]) @@ -236,7 +236,7 @@ sys.path = oldpath global __files __files = [] vars = {} - exec code in vars + exec(code, vars) def invoke(writer, file, method): if writer.endswith('.empy'): @@ -250,7 +250,7 @@ def invoke(writer, file, method): __output_files = {} __output_methods = {} def writeFile(filename, data, method = 'replace'): - if isinstance(data, types.StringType): + if isinstance(data, bytes): data = [x+'\n' for x in data.split('\n')] __output_methods[filename] = method __output_files[filename] = data @@ -276,7 +276,7 @@ def _getEolStyle(): return config.eol def write(): - if config.verbose: print 'preparing generator...' + if config.verbose: print('preparing generator...') global __preparedMkVars, __output_files __preparedMkVars = __copyMkToVars() @@ -284,9 +284,9 @@ def write(): for file, writer, method in config.to_output: try: - if config.verbose: print 'generating %s...' % file + if config.verbose: print('generating %s...' % file) invoke(writer, file, method) - except errors.Error, e: + except errors.Error as e: sys.stderr.write(str(e)) return 0 @@ -327,10 +327,10 @@ def write(): if changes_f != None: changes_f.write('%s\n' % os.path.abspath(file)) if not config.quiet: - print 'writing %s' % file + print('writing %s' % file) else: if not config.quiet: - print 'no changes in %s' % file + print('no changes in %s' % file) if f != None: __closeFile(f) diff --git a/src/xmlparser.py b/src/xmlparser.py index 097a8a7..0bcce93 100644 --- a/src/xmlparser.py +++ b/src/xmlparser.py @@ -199,16 +199,16 @@ def __doParseMinidom(func, src): t = handleNode(src, dom.documentElement) dom.unlink() return t - except xml.dom.DOMException, e: + except xml.dom.DOMException as e: sys.stderr.write("%s: error: %s\n" % (src, e)) raise ParsingError() - except xml.sax.SAXException, e: + except xml.sax.SAXException as e: sys.stderr.write("%s: error: %s\n" % (src, e)) raise ParsingError() - except xml.parsers.expat.ExpatError, e: + except xml.parsers.expat.ExpatError as e: sys.stderr.write("%s: error: %s\n" % (src, e)) raise ParsingError() - except IOError, e: + except IOError as e: sys.stderr.write("%s: error: %s\n" % (src, e)) raise ParsingError()