Blob Blame History Raw
diff --git a/sepolgen-1.2.2/ChangeLog b/sepolgen-1.2.2/ChangeLog
index a304ab7..bec96bb 100644
--- a/sepolgen-1.2.2/ChangeLog
+++ b/sepolgen-1.2.2/ChangeLog
@@ -1,3 +1,6 @@
+	* Add support for python3, from Robert Kuska.
+	* Add device tree ocontext nodes to Xen policy, from Daniel De Graaf.
+
 1.2.2 2015-02-02
 	* Fix bugs found by hardened gcc flags, from Nicolas Iooss.
 	* Remove unnecessary grammar in interface call param list to fix poor
diff --git a/sepolgen-1.2.2/src/sepolgen/access.py b/sepolgen-1.2.2/src/sepolgen/access.py
index cf13210..60ff4e9 100644
--- a/sepolgen-1.2.2/src/sepolgen/access.py
+++ b/sepolgen-1.2.2/src/sepolgen/access.py
@@ -31,7 +31,9 @@ and sets of that access (AccessVectorSet). These objects are used in Madison
 in a variety of ways, but they are the fundamental representation of access.
 """
 
-import refpolicy
+from . import refpolicy
+from . import util
+
 from selinux import audit2why
 
 def is_idparam(id):
@@ -51,7 +53,7 @@ def is_idparam(id):
     else:
         return False
 
-class AccessVector:
+class AccessVector(util.Comparison):
     """
     An access vector is the basic unit of access in SELinux.
 
@@ -88,6 +90,11 @@ class AccessVector:
             self.audit_msgs = []
             self.type = audit2why.TERULE
             self.data = []
+            self.obj_path = None
+            self.base_type = None
+        # when implementing __eq__ also __hash__ is needed on py2
+        # if object is muttable __hash__ should be None
+        self.__hash__ = None
 
         # The direction of the information flow represented by this
         # access vector - used for matching
@@ -133,6 +140,11 @@ class AccessVector:
         return "allow %s %s:%s %s;" % (self.src_type, self.tgt_type,
                                         self.obj_class, self.perms.to_space_str())
 
+    def base_file_type(self):
+        base_type_array = []
+        base_type_array = [self.base_type, self.tgt_type, self.src_type]
+        return base_type_array
+
     def __cmp__(self, other):
         if self.src_type != other.src_type:
             return cmp(self.src_type, other.src_type)
@@ -151,6 +163,19 @@ class AccessVector:
                 return cmp(pa, pb)
         return 0
 
+    def _compare(self, other, method):
+        try:
+            x = list(self.perms)
+            a = (self.src_type, self.tgt_type, self.obj_class, x)
+            y = list(other.perms)
+            x.sort()
+            y.sort()
+            b = (other.src_type, other.tgt_type, other.obj_class, y)
+            return method(a, b)
+        except (AttributeError, TypeError):
+            # trying to compare to foreign type
+            return NotImplemented
+
 def avrule_to_access_vectors(avrule):
     """Convert an avrule into a list of access vectors.
 
@@ -256,20 +281,23 @@ class AccessVectorSet:
         for av in l:
             self.add_av(AccessVector(av))
 
-    def add(self, src_type, tgt_type, obj_class, perms, audit_msg=None, avc_type=audit2why.TERULE, data=[]):
+    def add(self, src_type, tgt_type, obj_class, perms, obj_path=None,
+            base_type=None, audit_msg=None, avc_type=audit2why.TERULE, data=[]):
         """Add an access vector to the set.
         """
         tgt = self.src.setdefault(src_type, { })
         cls = tgt.setdefault(tgt_type, { })
         
-        if cls.has_key((obj_class, avc_type)):
+        if (obj_class, avc_type) in cls:
             access = cls[obj_class, avc_type]
         else:
             access = AccessVector()
             access.src_type = src_type
             access.tgt_type = tgt_type
             access.obj_class = obj_class
+            access.obj_path = obj_path
             access.data = data
+            access.base_type = base_type
             access.type = avc_type
             cls[obj_class, avc_type] = access
 
@@ -293,7 +321,7 @@ def avs_extract_types(avs):
 def avs_extract_obj_perms(avs):
     perms = { }
     for av in avs:
-        if perms.has_key(av.obj_class):
+        if av.obj_class in perms:
             s = perms[av.obj_class]
         else:
             s = refpolicy.IdSet()
@@ -321,7 +349,7 @@ class RoleTypeSet:
         return len(self.role_types.keys())
 
     def add(self, role, type):
-        if self.role_types.has_key(role):
+        if role in self.role_types:
             role_type = self.role_types[role]
         else:
             role_type = refpolicy.RoleType()
diff --git a/sepolgen-1.2.2/src/sepolgen/audit.py b/sepolgen-1.2.2/src/sepolgen/audit.py
index 56919be..dad0724 100644
--- a/sepolgen-1.2.2/src/sepolgen/audit.py
+++ b/sepolgen-1.2.2/src/sepolgen/audit.py
@@ -17,11 +17,12 @@
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 #
 
-import refpolicy
-import access
 import re
 import sys
 
+from . import refpolicy
+from . import access
+from . import util
 # Convenience functions
 
 def get_audit_boot_msgs():
@@ -42,6 +43,8 @@ def get_audit_boot_msgs():
     boottime = time.strftime("%X", s)
     output = subprocess.Popen(["/sbin/ausearch", "-m", "AVC,USER_AVC,MAC_POLICY_LOAD,DAEMON_START,SELINUX_ERR", "-ts", bootdate, boottime],
                               stdout=subprocess.PIPE).communicate()[0]
+    if util.PY3:
+        output = util.decode_input(output)
     return output
 
 def get_audit_msgs():
@@ -55,6 +58,8 @@ def get_audit_msgs():
     import subprocess
     output = subprocess.Popen(["/sbin/ausearch", "-m", "AVC,USER_AVC,MAC_POLICY_LOAD,DAEMON_START,SELINUX_ERR"],
                               stdout=subprocess.PIPE).communicate()[0]
+    if util.PY3:
+        output = util.decode_input(output)
     return output
 
 def get_dmesg_msgs():
@@ -66,6 +71,8 @@ def get_dmesg_msgs():
     import subprocess
     output = subprocess.Popen(["/bin/dmesg"],
                               stdout=subprocess.PIPE).communicate()[0]
+    if util.PY3:
+        output = util.decode_input(output)
     return output
 
 # Classes representing audit messages
@@ -169,6 +176,7 @@ class AVCMessage(AuditMessage):
         self.exe = ""
         self.path = ""
         self.name = ""
+        self.ino = ""
         self.accesses = []
         self.denial = True
         self.type = audit2why.TERULE
@@ -230,6 +238,10 @@ class AVCMessage(AuditMessage):
                 self.exe = fields[1][1:-1]
             elif fields[0] == "name":
                 self.name = fields[1][1:-1]
+            elif fields[0] == "path":
+                self.path = fields[1][1:-1]
+            elif fields[0] == "ino":
+                self.ino = fields[1]
 
         if not found_src or not found_tgt or not found_class or not found_access:
             raise ValueError("AVC message in invalid format [%s]\n" % self.message)
@@ -354,7 +366,9 @@ class AuditParser:
         self.path_msgs = []
         self.by_header = { }
         self.check_input_file = False
-                
+        self.inode_dict = { }
+        self.__store_base_types()
+
     # Low-level parsing function - tries to determine if this audit
     # message is an SELinux related message and then parses it into
     # the appropriate AuditMessage subclass. This function deliberately
@@ -430,7 +444,7 @@ class AuditParser:
 
         # Group by audit header
         if msg.header != "":
-            if self.by_header.has_key(msg.header):
+            if msg.header in self.by_header:
                 self.by_header[msg.header].append(msg)
             else:
                 self.by_header[msg.header] = [msg]
@@ -492,6 +506,61 @@ class AuditParser:
         
         return role_types
 
+    def __restore_path(self, name, inode):
+        import subprocess
+        import os
+        path = ""
+        # Optimizing
+        if name == "" or inode == "":
+            return path
+        for d in self.inode_dict:
+            if d == inode and self.inode_dict[d] == name:
+                return path
+            if d == inode and self.inode_dict[d] != name:
+                return self.inode_dict[d]
+        if inode not in self.inode_dict.keys():
+            self.inode_dict[inode] = name
+
+        command = "locate -b '\%s'" % name
+        try:
+            output = subprocess.check_output(command,
+                                             stderr=subprocess.STDOUT,
+                                             shell=True,
+                                             universal_newlines=True)
+            try:
+                ino = int(inode)
+            except ValueError:
+                pass
+            for file in output.split("\n"):
+                try:
+                    if int(os.lstat(file).st_ino) == ino:
+                        self.inode_dict[inode] = path = file
+                        return path
+                except:
+                    pass
+        except subprocess.CalledProcessError as e:
+            pass
+        return path
+
+    def __store_base_types(self):
+        import sepolicy
+        self.base_types = sepolicy.get_types_from_attribute("base_file_type")
+
+    def __get_base_type(self, tcontext, scontext):
+        import sepolicy
+        # Prevent unnecessary searching
+        if (self.old_scontext == scontext and
+            self.old_tcontext == tcontext):
+            return
+        self.old_scontext = scontext
+        self.old_tcontext = tcontext
+        for btype in self.base_types:
+            if btype == tcontext:
+                for writable in sepolicy.get_writable_files(scontext):
+                    if writable.endswith(tcontext) and writable.startswith(scontext.rstrip("_t")):
+                        return writable
+                return 0
+
     def to_access(self, avc_filter=None, only_denials=True):
         """Convert the audit logs access into a an access vector set.
 
@@ -510,16 +579,23 @@ class AuditParser:
            audit logs parsed by this object.
         """
         av_set = access.AccessVectorSet()
+        self.old_scontext = ""
+        self.old_tcontext = ""
         for avc in self.avc_msgs:
             if avc.denial != True and only_denials:
                 continue
+            base_type = self.__get_base_type(avc.tcontext.type, avc.scontext.type)
+            if avc.path == "":
+                avc.path = self.__restore_path(avc.name, avc.ino)
             if avc_filter:
                 if avc_filter.filter(avc):
                     av_set.add(avc.scontext.type, avc.tcontext.type, avc.tclass,
-                               avc.accesses, avc, avc_type=avc.type, data=avc.data)
+                               avc.accesses, avc.path, base_type, avc,
+                               avc_type=avc.type, data=avc.data)
             else:
                 av_set.add(avc.scontext.type, avc.tcontext.type, avc.tclass,
-                           avc.accesses, avc, avc_type=avc.type, data=avc.data)
+                           avc.accesses, avc.path, base_type, avc,
+                           avc_type=avc.type, data=avc.data)
         return av_set
 
 class AVCTypeFilter:
diff --git a/sepolgen-1.2.2/src/sepolgen/classperms.py b/sepolgen-1.2.2/src/sepolgen/classperms.py
index c925dee..f4fd899 100644
--- a/sepolgen-1.2.2/src/sepolgen/classperms.py
+++ b/sepolgen-1.2.2/src/sepolgen/classperms.py
@@ -49,10 +49,10 @@ def t_NAME(t):
     return t
 
 def t_error(t):
-    print "Illegal character '%s'" % t.value[0]
+    print("Illegal character '%s'" % t.value[0])
     t.skip(1)
 
-import lex
+from . import lex
 lex.lex()
 
 def p_statements(p):
@@ -90,9 +90,9 @@ def p_names(p):
         p[0] = [p[1]] + p[2]
 
 def p_error(p):
-    print "Syntax error on line %d %s [type=%s]" % (p.lineno, p.value, p.type)
+    print("Syntax error on line %d %s [type=%s]" % (p.lineno, p.value, p.type))
     
-import yacc
+from . import yacc
 yacc.yacc()
 
 
@@ -112,5 +112,5 @@ test2 = """define(`all_filesystem_perms',`{ mount remount unmount getattr relabe
 define(`all_security_perms',`{ compute_av compute_create compute_member check_context load_policy compute_relabel compute_user setenforce setbool setsecparam setcheckreqprot }')
 """
 result = yacc.parse(txt)
-print result
+print(result)
     
diff --git a/sepolgen-1.2.2/src/sepolgen/defaults.py b/sepolgen-1.2.2/src/sepolgen/defaults.py
index 218bc7c..9591063 100644
--- a/sepolgen-1.2.2/src/sepolgen/defaults.py
+++ b/sepolgen-1.2.2/src/sepolgen/defaults.py
@@ -36,14 +36,14 @@ class PathChoooser(object):
             if ignore.match(line): continue
             mo = consider.match(line)
             if not mo:
-                raise ValueError, "%s:%d: line is not in key = value format" % (pathname, lineno+1)
+                raise ValueError("%s:%d: line is not in key = value format" % (pathname, lineno+1))
             self.config[mo.group(1)] = mo.group(2)
 
     # We're only exporting one useful function, so why not be a function
     def __call__(self, testfilename, pathset="SELINUX_DEVEL_PATH"):
         paths = self.config.get(pathset, None)
         if paths is None:
-            raise ValueError, "%s was not in %s" % (pathset, self.config_pathname)
+            raise ValueError("%s was not in %s" % (pathset, self.config_pathname))
         paths = paths.split(":")
         for p in paths:
             target = os.path.join(p, testfilename)
diff --git a/sepolgen-1.2.2/src/sepolgen/interfaces.py b/sepolgen-1.2.2/src/sepolgen/interfaces.py
index 88a6dc3..0b688bf 100644
--- a/sepolgen-1.2.2/src/sepolgen/interfaces.py
+++ b/sepolgen-1.2.2/src/sepolgen/interfaces.py
@@ -21,15 +21,15 @@
 Classes for representing and manipulating interfaces.
 """
 
-import access
-import refpolicy
+import copy
 import itertools
-import objectmodel
-import matching
 
-from sepolgeni18n import _
+from . import access
+from . import refpolicy
+from . import objectmodel
+from . import matching
+from .sepolgeni18n import _
 
-import copy
 
 class Param:
     """
@@ -276,7 +276,7 @@ class InterfaceVector:
         if attributes:
             for typeattribute in interface.typeattributes():
                 for attr in typeattribute.attributes:
-                    if not attributes.attributes.has_key(attr):
+                    if attr not in attributes.attributes:
                         # print "missing attribute " + attr
                         continue
                     attr_vec = attributes.attributes[attr]
diff --git a/sepolgen-1.2.2/src/sepolgen/lex.py b/sepolgen-1.2.2/src/sepolgen/lex.py
index c149366..c13acef 100644
--- a/sepolgen-1.2.2/src/sepolgen/lex.py
+++ b/sepolgen-1.2.2/src/sepolgen/lex.py
@@ -26,18 +26,21 @@ __version__ = "2.2"
 
 import re, sys, types
 
+from . import util
+import collections
+
+
 # Regular expression used to match valid token names
 _is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
 
-# Available instance types.  This is used when lexers are defined by a class.
-# It's a little funky because I want to preserve backwards compatibility
-# with Python 2.0 where types.ObjectType is undefined.
+# Available instance types.  This is used when parsers are defined by a class.
+# In Python3 the InstanceType and ObjectType are no more, they've passed, ceased
+# to be, they are ex-classes along with old-style classes
 
 try:
    _INSTANCETYPE = (types.InstanceType, types.ObjectType)
 except AttributeError:
-   _INSTANCETYPE = types.InstanceType
-   class object: pass       # Note: needed if no new-style classes present
+   _INSTANCETYPE = object
 
 # Exception thrown when invalid token encountered and no default error
 # handler is defined.
@@ -172,7 +175,7 @@ class Lexer:
     # readtab() - Read lexer information from a tab file
     # ------------------------------------------------------------
     def readtab(self,tabfile,fdict):
-        exec "import %s as lextab" % tabfile
+        exec("import %s as lextab" % tabfile)
         self.lextokens      = lextab._lextokens
         self.lexreflags     = lextab._lexreflags
         self.lexliterals    = lextab._lexliterals
@@ -197,8 +200,8 @@ class Lexer:
     # input() - Push a new string into the lexer
     # ------------------------------------------------------------
     def input(self,s):
-        if not (isinstance(s,types.StringType) or isinstance(s,types.UnicodeType)):
-            raise ValueError, "Expected a string"
+        if not (isinstance(s,util.bytes_type) or isinstance(s, util.string_type)):
+            raise ValueError("Expected a string")
         self.lexdata = s
         self.lexpos = 0
         self.lexlen = len(s)
@@ -207,8 +210,8 @@ class Lexer:
     # begin() - Changes the lexing state
     # ------------------------------------------------------------
     def begin(self,state):
-        if not self.lexstatere.has_key(state):
-            raise ValueError, "Undefined state"
+        if state not in self.lexstatere:
+            raise ValueError("Undefined state")
         self.lexre = self.lexstatere[state]
         self.lexretext = self.lexstateretext[state]
         self.lexignore = self.lexstateignore.get(state,"")
@@ -286,7 +289,7 @@ class Lexer:
                    break
 
                 # if func not callable, it means it's an ignored token                
-                if not callable(func):
+                if not isinstance(func, collections.Callable):
                    break 
 
                 # If token is processed by a function, call it
@@ -299,9 +302,9 @@ class Lexer:
                 
                 # Verify type of the token.  If not in the token map, raise an error
                 if not self.lexoptimize:
-                    if not self.lextokens.has_key(newtok.type):
-                        raise LexError, ("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
-                            func.func_code.co_filename, func.func_code.co_firstlineno,
+                    if newtok.type not in self.lextokens:
+                        raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
+                            func.__code__.co_filename, func.__code__.co_firstlineno,
                             func.__name__, newtok.type),lexdata[lexpos:])
 
                 return newtok
@@ -329,17 +332,17 @@ class Lexer:
                     newtok = self.lexerrorf(tok)
                     if lexpos == self.lexpos:
                         # Error method didn't change text position at all. This is an error.
-                        raise LexError, ("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
+                        raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
                     lexpos = self.lexpos
                     if not newtok: continue
                     return newtok
 
                 self.lexpos = lexpos
-                raise LexError, ("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
+                raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
 
         self.lexpos = lexpos + 1
         if self.lexdata is None:
-             raise RuntimeError, "No input string given with input()"
+             raise RuntimeError("No input string given with input()")
         return None
         
 # -----------------------------------------------------------------------------
@@ -377,7 +380,7 @@ def _validate_file(filename):
             if not prev:
                 counthash[name] = linen
             else:
-                print "%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev)
+                print("%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev))
                 noerror = 0
         linen += 1
     return noerror
@@ -439,12 +442,12 @@ def _form_master_re(relist,reflags,ldict):
                 # callback function to carry out the action
                 if f.find("ignore_") > 0:
                     lexindexfunc[i] = (None,None)
-                    print "IGNORE", f
+                    print("IGNORE", f)
                 else:
                     lexindexfunc[i] = (None, f[2:])
          
         return [(lexre,lexindexfunc)],[regex]
-    except Exception,e:
+    except Exception as e:
         m = int(len(relist)/2)
         if m == 0: m = 1
         llist, lre = _form_master_re(relist[:m],reflags,ldict)
@@ -464,7 +467,7 @@ def _statetoken(s,names):
     nonstate = 1
     parts = s.split("_")
     for i in range(1,len(parts)):
-         if not names.has_key(parts[i]) and parts[i] != 'ANY': break
+         if parts[i] not in names and parts[i] != 'ANY': break
     if i > 1:
        states = tuple(parts[1:i])
     else:
@@ -507,7 +510,7 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
             for (i,v) in _items:
                 ldict[i] = v
         else:
-            raise ValueError,"Expected a module or instance"
+            raise ValueError("Expected a module or instance")
         lexobj.lexmodule = module
         
     else:
@@ -542,61 +545,64 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
         literals = ldict.get("literals","")
         
     if not tokens:
-        raise SyntaxError,"lex: module does not define 'tokens'"
-    if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
-        raise SyntaxError,"lex: tokens must be a list or tuple."
+        raise SyntaxError("lex: module does not define 'tokens'")
+    if not (isinstance(tokens,list) or isinstance(tokens,tuple)):
+        raise SyntaxError("lex: tokens must be a list or tuple.")
 
     # Build a dictionary of valid token names
     lexobj.lextokens = { }
     if not optimize:
         for n in tokens:
             if not _is_identifier.match(n):
-                print "lex: Bad token name '%s'" % n
+                print("lex: Bad token name '%s'" % n)
                 error = 1
-            if warn and lexobj.lextokens.has_key(n):
-                print "lex: Warning. Token '%s' multiply defined." % n
+            if warn and n in lexobj.lextokens:
+                print("lex: Warning. Token '%s' multiply defined." % n)
             lexobj.lextokens[n] = None
     else:
         for n in tokens: lexobj.lextokens[n] = None
 
     if debug:
-        print "lex: tokens = '%s'" % lexobj.lextokens.keys()
+        print("lex: tokens = '%s'" % list(lexobj.lextokens.keys()))
 
     try:
          for c in literals:
-               if not (isinstance(c,types.StringType) or isinstance(c,types.UnicodeType)) or len(c) > 1:
-                    print "lex: Invalid literal %s. Must be a single character" % repr(c)
+               if not (isinstance(c,util.bytes_type) or isinstance(c, util.string_type)) or len(c) > 1:
+                    print("lex: Invalid literal %s. Must be a single character" % repr(c))
                     error = 1
                     continue
 
     except TypeError:
-         print "lex: Invalid literals specification. literals must be a sequence of characters."
+         print("lex: Invalid literals specification. literals must be a sequence of characters.")
          error = 1
 
     lexobj.lexliterals = literals
 
     # Build statemap
     if states:
-         if not (isinstance(states,types.TupleType) or isinstance(states,types.ListType)):
-              print "lex: states must be defined as a tuple or list."
+         if not (isinstance(states,tuple) or isinstance(states,list)):
+              print("lex: states must be defined as a tuple or list.")
               error = 1
          else:
               for s in states:
-                    if not isinstance(s,types.TupleType) or len(s) != 2:
-                           print "lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s)
+                    if not isinstance(s,tuple) or len(s) != 2:
+                           print("lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s))
                            error = 1
                            continue
                     name, statetype = s
-                    if not isinstance(name,types.StringType):
-                           print "lex: state name %s must be a string" % repr(name)
+                    if isinstance(name, util.string_type):
+                           original_name = name
+                           name = util.encode_input(name)
+                    if not isinstance(name,util.bytes_type) or len(original_name) != len(name):
+                           print("lex: state name %s must be a byte string" % repr(original_name))
                            error = 1
                            continue
                     if not (statetype == 'inclusive' or statetype == 'exclusive'):
-                           print "lex: state type for state %s must be 'inclusive' or 'exclusive'" % name
+                           print("lex: state type for state %s must be 'inclusive' or 'exclusive'" % name)
                            error = 1
                            continue
-                    if stateinfo.has_key(name):
-                           print "lex: state '%s' already defined." % name
+                    if name in stateinfo:
+                           print("lex: state '%s' already defined." % name)
                            error = 1
                            continue
                     stateinfo[name] = statetype
@@ -618,28 +624,28 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
     errorf   = { }        # Error functions by state
 
     if len(tsymbols) == 0:
-        raise SyntaxError,"lex: no rules of the form t_rulename are defined."
+        raise SyntaxError("lex: no rules of the form t_rulename are defined.")
 
     for f in tsymbols:
         t = ldict[f]
         states, tokname = _statetoken(f,stateinfo)
         toknames[f] = tokname
 
-        if callable(t):
+        if isinstance(t, collections.Callable):
             for s in states: funcsym[s].append((f,t))
-        elif (isinstance(t, types.StringType) or isinstance(t,types.UnicodeType)):
+        elif (isinstance(t, util.bytes_type) or isinstance(t,util.string_type)):
             for s in states: strsym[s].append((f,t))
         else:
-            print "lex: %s not defined as a function or string" % f
+            print("lex: %s not defined as a function or string" % f)
             error = 1
 
     # Sort the functions by line number
     for f in funcsym.values():
-        f.sort(lambda x,y: cmp(x[1].func_code.co_firstlineno,y[1].func_code.co_firstlineno))
+        f.sort(key=lambda x: x[1].__code__.co_firstlineno)
 
     # Sort the strings by regular expression length
     for s in strsym.values():
-        s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
+        s.sort(key=lambda x: len(x[1]))
 
     regexs = { }
 
@@ -649,31 +655,31 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
 
         # Add rules defined by functions first
         for fname, f in funcsym[state]:
-            line = f.func_code.co_firstlineno
-            file = f.func_code.co_filename
+            line = f.__code__.co_firstlineno
+            file = f.__code__.co_filename
             files[file] = None
             tokname = toknames[fname]
 
             ismethod = isinstance(f, types.MethodType)
 
             if not optimize:
-                nargs = f.func_code.co_argcount
+                nargs = f.__code__.co_argcount
                 if ismethod:
                     reqargs = 2
                 else:
                     reqargs = 1
                 if nargs > reqargs:
-                    print "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__)
+                    print("%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__))
                     error = 1
                     continue
 
                 if nargs < reqargs:
-                    print "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__)
+                    print("%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__))
                     error = 1
                     continue
 
                 if tokname == 'ignore':
-                    print "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__)
+                    print("%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__))
                     error = 1
                     continue
         
@@ -686,25 +692,25 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
                     try:
                         c = re.compile("(?P<%s>%s)" % (f.__name__,f.__doc__), re.VERBOSE | reflags)
                         if c.match(""):
-                             print "%s:%d: Regular expression for rule '%s' matches empty string." % (file,line,f.__name__)
+                             print("%s:%d: Regular expression for rule '%s' matches empty string." % (file,line,f.__name__))
                              error = 1
                              continue
-                    except re.error,e:
-                        print "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e)
+                    except re.error as e:
+                        print("%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e))
                         if '#' in f.__doc__:
-                             print "%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'." % (file,line, f.__name__)                 
+                             print("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'." % (file,line, f.__name__))
                         error = 1
                         continue
 
                     if debug:
-                        print "lex: Adding rule %s -> '%s' (state '%s')" % (f.__name__,f.__doc__, state)
+                        print("lex: Adding rule %s -> '%s' (state '%s')" % (f.__name__,f.__doc__, state))
 
                 # Okay. The regular expression seemed okay.  Let's append it to the master regular
                 # expression we're building
   
                 regex_list.append("(?P<%s>%s)" % (f.__name__,f.__doc__))
             else:
-                print "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__)
+                print("%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__))
 
         # Now add all of the simple rules
         for name,r in strsym[state]:
@@ -716,34 +722,34 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
 
             if not optimize:
                 if tokname == 'error':
-                    raise SyntaxError,"lex: Rule '%s' must be defined as a function" % name
+                    raise SyntaxError("lex: Rule '%s' must be defined as a function" % name)
                     error = 1
                     continue
         
-                if not lexobj.lextokens.has_key(tokname) and tokname.find("ignore_") < 0:
-                    print "lex: Rule '%s' defined for an unspecified token %s." % (name,tokname)
+                if tokname not in lexobj.lextokens and tokname.find("ignore_") < 0:
+                    print("lex: Rule '%s' defined for an unspecified token %s." % (name,tokname))
                     error = 1
                     continue
                 try:
                     c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | reflags)
                     if (c.match("")):
-                         print "lex: Regular expression for rule '%s' matches empty string." % name
+                         print("lex: Regular expression for rule '%s' matches empty string." % name)
                          error = 1
                          continue
-                except re.error,e:
-                    print "lex: Invalid regular expression for rule '%s'. %s" % (name,e)
+                except re.error as e:
+                    print("lex: Invalid regular expression for rule '%s'. %s" % (name,e))
                     if '#' in r:
-                         print "lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name
+                         print("lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name)
 
                     error = 1
                     continue
                 if debug:
-                    print "lex: Adding rule %s -> '%s' (state '%s')" % (name,r,state)
+                    print("lex: Adding rule %s -> '%s' (state '%s')" % (name,r,state))
                 
             regex_list.append("(?P<%s>%s)" % (name,r))
 
         if not regex_list:
-             print "lex: No rules defined for state '%s'" % state
+             print("lex: No rules defined for state '%s'" % state)
              error = 1
 
         regexs[state] = regex_list
@@ -755,7 +761,7 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
                 error = 1
 
     if error:
-        raise SyntaxError,"lex: Unable to build lexer."
+        raise SyntaxError("lex: Unable to build lexer.")
 
     # From this point forward, we're reasonably confident that we can build the lexer.
     # No more errors will be generated, but there might be some warning messages.
@@ -768,7 +774,7 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
         lexobj.lexstateretext[state] = re_text
         if debug:
             for i in range(len(re_text)):
-                 print "lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i])
+                 print("lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i]))
 
     # For inclusive states, we need to add the INITIAL state
     for state,type in stateinfo.items():
@@ -788,19 +794,19 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
     lexobj.lexstateerrorf = errorf
     lexobj.lexerrorf = errorf.get("INITIAL",None)
     if warn and not lexobj.lexerrorf:
-        print "lex: Warning. no t_error rule is defined."
+        print("lex: Warning. no t_error rule is defined.")
 
     # Check state information for ignore and error rules
     for s,stype in stateinfo.items():
         if stype == 'exclusive':
-              if warn and not errorf.has_key(s):
-                   print "lex: Warning. no error rule is defined for exclusive state '%s'" % s
-              if warn and not ignore.has_key(s) and lexobj.lexignore:
-                   print "lex: Warning. no ignore rule is defined for exclusive state '%s'" % s
+              if warn and s not in errorf:
+                   print("lex: Warning. no error rule is defined for exclusive state '%s'" % s)
+              if warn and s not in ignore and lexobj.lexignore:
+                   print("lex: Warning. no ignore rule is defined for exclusive state '%s'" % s)
         elif stype == 'inclusive':
-              if not errorf.has_key(s):
+              if s not in errorf:
                    errorf[s] = errorf.get("INITIAL",None)
-              if not ignore.has_key(s):
+              if s not in ignore:
                    ignore[s] = ignore.get("INITIAL","")
    
 
@@ -829,7 +835,7 @@ def runmain(lexer=None,data=None):
             data = f.read()
             f.close()
         except IndexError:
-            print "Reading from standard input (type EOF to end):"
+            print("Reading from standard input (type EOF to end):")
             data = sys.stdin.read()
 
     if lexer:
@@ -845,7 +851,7 @@ def runmain(lexer=None,data=None):
     while 1:
         tok = _token()
         if not tok: break
-        print "(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno,tok.lexpos)
+        print("(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno,tok.lexpos))
         
 
 # -----------------------------------------------------------------------------
diff --git a/sepolgen-1.2.2/src/sepolgen/matching.py b/sepolgen-1.2.2/src/sepolgen/matching.py
index d56dd92..6f86359 100644
--- a/sepolgen-1.2.2/src/sepolgen/matching.py
+++ b/sepolgen-1.2.2/src/sepolgen/matching.py
@@ -21,33 +21,30 @@
 Classes and algorithms for matching requested access to access vectors.
 """
 
-import access
-import objectmodel
 import itertools
 
-class Match:
+from . import access
+from . import objectmodel
+from . import util
+
+
+class Match(util.Comparison):
     def __init__(self, interface=None, dist=0):
         self.interface = interface
         self.dist = dist
         self.info_dir_change = False
-
-    def __cmp__(self, other):
-        if self.dist == other.dist:
-            if self.info_dir_change:
-                if other.info_dir_change:
-                    return 0
-                else:
-                    return 1
-            else:
-                if other.info_dir_change:
-                    return -1
-                else:
-                    return 0
-        else:
-            if self.dist < other.dist:
-                return -1
-            else:
-                return 1
+        # when implementing __eq__ also __hash__ is needed on py2
+        # if object is muttable __hash__ should be None
+        self.__hash__ = None
+
+    def _compare(self, other, method):
+        try:
+            a = (self.dist, self.info_dir_change)
+            b = (other.dist, other.info_dir_change)
+            return method(a, b)
+        except (AttributeError, TypeError):
+            # trying to compare to foreign type
+            return NotImplemented
 
 class MatchList:
     DEFAULT_THRESHOLD = 150
diff --git a/sepolgen-1.2.2/src/sepolgen/module.py b/sepolgen-1.2.2/src/sepolgen/module.py
index 7fc9443..c09676a 100644
--- a/sepolgen-1.2.2/src/sepolgen/module.py
+++ b/sepolgen-1.2.2/src/sepolgen/module.py
@@ -22,18 +22,21 @@ Utilities for dealing with the compilation of modules and creation
 of module tress.
 """
 
-import defaults
-
-import selinux
-
 import re
 import tempfile
-import commands
+try:
+    from subprocess import getstatusoutput
+except ImportError:
+    from commands import getstatusoutput
 import os
 import os.path
-import subprocess
 import shutil
 
+import selinux
+
+from . import defaults
+
+
 def is_valid_name(modname):
     """Check that a module name is valid.
     """
@@ -130,7 +133,7 @@ class ModuleCompiler:
 
     def run(self, command):
         self.o(command)
-        rc, output = commands.getstatusoutput(command)
+        rc, output = getstatusoutput(command)
         self.o(output)
         
         return rc
diff --git a/sepolgen-1.2.2/src/sepolgen/objectmodel.py b/sepolgen-1.2.2/src/sepolgen/objectmodel.py
index 88c8a1f..d05d721 100644
--- a/sepolgen-1.2.2/src/sepolgen/objectmodel.py
+++ b/sepolgen-1.2.2/src/sepolgen/objectmodel.py
@@ -118,7 +118,7 @@ class PermMappings:
                 continue
             if fields[0] == "class":
                 c = fields[1]
-                if self.classes.has_key(c):
+                if c in self.classes:
                     raise ValueError("duplicate class in perm map")
                 self.classes[c] = { }
                 cur = self.classes[c]
diff --git a/sepolgen-1.2.2/src/sepolgen/output.py b/sepolgen-1.2.2/src/sepolgen/output.py
index 739452d..7a83aee 100644
--- a/sepolgen-1.2.2/src/sepolgen/output.py
+++ b/sepolgen-1.2.2/src/sepolgen/output.py
@@ -27,8 +27,12 @@ generating policy. This keeps the semantic / syntactic issues
 cleanly separated from the formatting issues.
 """
 
-import refpolicy
-import util
+from . import refpolicy
+from . import util
+
+if util.PY3:
+    from .util import cmp
+
 
 class ModuleWriter:
     def __init__(self):
@@ -127,7 +131,7 @@ def sort_filter(module):
         rules = []
         rules.extend(node.avrules())
         rules.extend(node.interface_calls())
-        rules.sort(rule_cmp)
+        rules.sort(key=util.cmp_to_key(rule_cmp))
 
         cur = None
         sep_rules = []
@@ -151,7 +155,7 @@ def sort_filter(module):
 
         ras = []
         ras.extend(node.role_types())
-        ras.sort(role_type_cmp)
+        ras.sort(key=util.cmp_to_key(role_type_cmp))
         if len(ras):
             comment = refpolicy.Comment()
             comment.lines.append("============= ROLES ==============")
diff --git a/sepolgen-1.2.2/src/sepolgen/policygen.py b/sepolgen-1.2.2/src/sepolgen/policygen.py
index 5f38577..f374132 100644
--- a/sepolgen-1.2.2/src/sepolgen/policygen.py
+++ b/sepolgen-1.2.2/src/sepolgen/policygen.py
@@ -24,17 +24,18 @@ classes and algorithms for the generation of SELinux policy.
 import itertools
 import textwrap
 
-import refpolicy
-import objectmodel
-import access
-import interfaces
-import matching
 import selinux.audit2why as audit2why
 try:
     from setools import *
 except:
     pass
 
+from . import refpolicy
+from . import objectmodel
+from . import access
+from . import interfaces
+from . import matching
+from . import util
 # Constants for the level of explanation from the generation
 # routines
 NO_EXPLANATION    = 0
@@ -81,8 +82,9 @@ class PolicyGenerator:
             self.module = refpolicy.Module()
 
         self.dontaudit = False
-
+        self.mislabled = None
         self.domains = None
+
     def set_gen_refpol(self, if_set=None, perm_maps=None):
         """Set whether reference policy interfaces are generated.
 
@@ -152,6 +154,18 @@ class PolicyGenerator:
         """Return the generated module"""
         return self.module
 
+    def __restore_label(self, av):
+        import selinux
+        try:
+            context = selinux.matchpathcon(av.obj_path, 0)
+            split = context[1].split(":")[2]
+            if split != av.tgt_type:
+                self.mislabled = split
+                return
+        except OSError:
+            pass
+        self.mislabled = None
+
     def __add_allow_rules(self, avs):
         for av in avs:
             rule = refpolicy.AVRule(av)
@@ -160,6 +174,34 @@ class PolicyGenerator:
             rule.comment = ""
             if self.explain:
                 rule.comment = str(refpolicy.Comment(explain_access(av, verbosity=self.explain)))
+            # base_type[0] == 0 means there exists a base type but not the path
+            # base_type[0] == None means user isn't using base type
+            # base_type[1] contains the target context
+            # base_type[2] contains the source type
+            base_type = av.base_file_type()
+            if base_type[0] == 0 and av.type != audit2why.ALLOW:
+                  rule.comment += "\n#!!!! WARNING: '%s' is a base type." % "".join(base_type[1])
+            for perm in av.perms:
+                if perm == "write" or perm == "create":
+                    permission = True
+                    break
+                else:
+                    permission = False
+
+            # Catch perms 'write' and 'create' for base types
+            if (base_type[0] is not None and base_type[0] != 0
+                and permission and av.type != audit2why.ALLOW):
+                if av.obj_class == dir:
+                    comp = "(/.*?)"
+                else:
+                    comp = ""
+                rule.comment += "\n#!!!! WARNING '%s' is not allowed to write or create to %s.  Change the label to %s." % ("".join(base_type[2]), "".join(base_type[1]), "".join(base_type[0]))
+                if av.obj_path != "":
+                    rule.comment += "\n#!!!! $ semanage fcontext -a -t %s %s%s   \n#!!!! $ restorecon -R -v %s" % ("".join(base_type[0]), "".join(av.obj_path), "".join(comp) ,"".join(av.obj_path))
+
+            self.__restore_label(av)
+            if self.mislabled is not None and av.type != audit2why.ALLOW:
+                rule.comment += "\n#!!!! The file '%s' is mislabeled on your system.  \n#!!!! Fix with $ restorecon -R -v %s" % ("".join(av.obj_path), "".join(av.obj_path))
             if av.type == audit2why.ALLOW:
                 rule.comment += "\n#!!!! This avc is allowed in the current policy"
             if av.type == audit2why.DONTAUDIT:
@@ -167,14 +209,14 @@ class PolicyGenerator:
 
             if av.type == audit2why.BOOLEAN:
                 if len(av.data) > 1:
-                    rule.comment += "\n#!!!! This avc can be allowed using one of the these booleans:\n#     %s" % ", ".join(map(lambda x: x[0], av.data))
+                    rule.comment += "\n#!!!! This avc can be allowed using one of the these booleans:\n#     %s" % ", ".join([x[0] for x in av.data])
                 else:
                     rule.comment += "\n#!!!! This avc can be allowed using the boolean '%s'" % av.data[0][0]
 
             if av.type == audit2why.CONSTRAINT:
                 rule.comment += "\n#!!!! This avc is a constraint violation.  You would need to modify the attributes of either the source or target types to allow this access."
                 rule.comment += "\n#Constraint rule: "
-                rule.comment += "\n\t" + av.data[0]
+                rule.comment += "\n#\t" + av.data[0]
                 for reason in av.data[1:]:
                     rule.comment += "\n#\tPossible cause is the source %s and target %s are different." % reason
 
@@ -186,7 +228,7 @@ class PolicyGenerator:
                         self.domains = seinfo(ATTRIBUTE, name="domain")[0]["types"]
                     types=[]
 
-                    for i in map(lambda x: x[TCONTEXT], sesearch([ALLOW], {SCONTEXT: av.src_type, CLASS: av.obj_class, PERMS: av.perms})):
+                    for i in [x[TCONTEXT] for x in sesearch([ALLOW], {SCONTEXT: av.src_type, CLASS: av.obj_class, PERMS: av.perms})]:
                         if i not in self.domains:
                             types.append(i)
                     if len(types) == 1:
@@ -275,15 +317,12 @@ def explain_access(av, ml=None, verbosity=SHORT_EXPLANATION):
         explain_interfaces()
     return s
 
-def param_comp(a, b):
-    return cmp(b.num, a.num)
-
 def call_interface(interface, av):
     params = []
     args = []
 
     params.extend(interface.params.values())
-    params.sort(param_comp)
+    params.sort(key=lambda param: param.num, reverse=True)
 
     ifcall = refpolicy.InterfaceCall()
     ifcall.ifname = interface.name
@@ -296,7 +335,7 @@ def call_interface(interface, av):
         elif params[i].type == refpolicy.OBJ_CLASS:
             ifcall.args.append(av.obj_class)
         else:
-            print params[i].type
+            print(params[i].type)
             assert(0)
 
     assert(len(ifcall.args) > 0)
@@ -318,7 +357,7 @@ class InterfaceGenerator:
         for x in ifs.interfaces.values():
             params = []
             params.extend(x.params.values())
-            params.sort(param_comp)
+            params.sort(key=lambda param: param.num, reverse=True)
             for i in range(len(params)):
                 # Check that the paramater position matches
                 # the number (e.g., $1 is the first arg). This
diff --git a/sepolgen-1.2.2/src/sepolgen/refparser.py b/sepolgen-1.2.2/src/sepolgen/refparser.py
index b453a29..f5ff19c 100644
--- a/sepolgen-1.2.2/src/sepolgen/refparser.py
+++ b/sepolgen-1.2.2/src/sepolgen/refparser.py
@@ -34,12 +34,11 @@ import os
 import re
 import traceback
 
-import refpolicy
-import access
-import defaults
-
-import lex
-import yacc
+from . import access
+from . import defaults
+from . import lex
+from . import refpolicy
+from . import yacc
 
 # :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
 #
@@ -267,7 +266,7 @@ def t_comment(t):
     t.lexer.lineno += 1
 
 def t_error(t):
-    print "Illegal character '%s'" % t.value[0]
+    print("Illegal character '%s'" % t.value[0])
     t.skip(1)
 
 def t_newline(t):
@@ -960,7 +959,7 @@ def p_optional_semi(p):
 def p_error(tok):
     global error, parse_file, success, parser
     error = "%s: Syntax error on line %d %s [type=%s]" % (parse_file, tok.lineno, tok.value, tok.type)
-    print error
+    print(error)
     success = False
 
 def prep_spt(spt):
@@ -997,7 +996,7 @@ def parse(text, module=None, support=None, debug=False):
 
     try:
         parser.parse(text, debug=debug, lexer=lexer)
-    except Exception, e:
+    except Exception as e:
         parser = None
         lexer = None
         error = "internal parser error: %s" % str(e) + "\n" + traceback.format_exc()
@@ -1030,7 +1029,7 @@ def list_headers(root):
 
 
 def parse_headers(root, output=None, expand=True, debug=False):
-    import util
+    from . import util
 
     headers = refpolicy.Headers()
 
@@ -1064,9 +1063,9 @@ def parse_headers(root, output=None, expand=True, debug=False):
             fd.close()
             parse_file = f
             parse(txt, module, spt, debug)
-        except IOError, e:
+        except IOError as e:
             return
-        except ValueError, e:
+        except ValueError as e:
             raise ValueError("error parsing file %s: %s" % (f, str(e)))
 
     spt = None
@@ -1102,7 +1101,7 @@ def parse_headers(root, output=None, expand=True, debug=False):
                 parse_file(x[1], m, spt)
             else:
                 parse_file(x[1], m)
-        except ValueError, e:
+        except ValueError as e:
             o(str(e) + "\n")
             failures.append(x[1])
             continue
diff --git a/sepolgen-1.2.2/src/sepolgen/refpolicy.py b/sepolgen-1.2.2/src/sepolgen/refpolicy.py
index 8ad64a9..a9bb92d 100644
--- a/sepolgen-1.2.2/src/sepolgen/refpolicy.py
+++ b/sepolgen-1.2.2/src/sepolgen/refpolicy.py
@@ -18,7 +18,6 @@
 #
 
 import string
-import itertools
 import selinux
 
 # OVERVIEW
@@ -85,53 +84,53 @@ class Node(PolicyBase):
     # Top level nodes
 
     def nodes(self):
-        return itertools.ifilter(lambda x: isinstance(x, Node), walktree(self))
+        return filter(lambda x: isinstance(x, Node), walktree(self))
 
     def modules(self):
-        return itertools.ifilter(lambda x: isinstance(x, Module), walktree(self))
+        return filter(lambda x: isinstance(x, Module), walktree(self))
 
     def interfaces(self):
-        return itertools.ifilter(lambda x: isinstance(x, Interface), walktree(self))
+        return filter(lambda x: isinstance(x, Interface), walktree(self))
 
     def templates(self):
-        return itertools.ifilter(lambda x: isinstance(x, Template), walktree(self))
+        return filter(lambda x: isinstance(x, Template), walktree(self))
 
     def support_macros(self):
-        return itertools.ifilter(lambda x: isinstance(x, SupportMacros), walktree(self))
+        return filter(lambda x: isinstance(x, SupportMacros), walktree(self))
 
     # Common policy statements
 
     def module_declarations(self):
-        return itertools.ifilter(lambda x: isinstance(x, ModuleDeclaration), walktree(self))
+        return filter(lambda x: isinstance(x, ModuleDeclaration), walktree(self))
 
     def interface_calls(self):
-        return itertools.ifilter(lambda x: isinstance(x, InterfaceCall), walktree(self))
+        return filter(lambda x: isinstance(x, InterfaceCall), walktree(self))
 
     def avrules(self):
-        return itertools.ifilter(lambda x: isinstance(x, AVRule), walktree(self))
+        return filter(lambda x: isinstance(x, AVRule), walktree(self))
 
     def typerules(self):
-        return itertools.ifilter(lambda x: isinstance(x, TypeRule), walktree(self))
+        return filter(lambda x: isinstance(x, TypeRule), walktree(self))
 
     def typeattributes(self):
         """Iterate over all of the TypeAttribute children of this Interface."""
-        return itertools.ifilter(lambda x: isinstance(x, TypeAttribute), walktree(self))
+        return filter(lambda x: isinstance(x, TypeAttribute), walktree(self))
 
     def roleattributes(self):
         """Iterate over all of the RoleAttribute children of this Interface."""
-        return itertools.ifilter(lambda x: isinstance(x, RoleAttribute), walktree(self))
+        return filter(lambda x: isinstance(x, RoleAttribute), walktree(self))
 
     def requires(self):
-        return itertools.ifilter(lambda x: isinstance(x, Require), walktree(self))
+        return filter(lambda x: isinstance(x, Require), walktree(self))
 
     def roles(self):
-        return itertools.ifilter(lambda x: isinstance(x, Role), walktree(self))
+        return filter(lambda x: isinstance(x, Role), walktree(self))
 
     def role_allows(self):
-        return itertools.ifilter(lambda x: isinstance(x, RoleAllow), walktree(self))
+        return filter(lambda x: isinstance(x, RoleAllow), walktree(self))
 
     def role_types(self):
-        return itertools.ifilter(lambda x: isinstance(x, RoleType), walktree(self))
+        return filter(lambda x: isinstance(x, RoleType), walktree(self))
 
     def __str__(self):
         if self.comment:
@@ -291,7 +290,7 @@ class SecurityContext(Leaf):
         self.type = fields[2]
         if len(fields) > 3:
             # FUTURE - normalize level fields to allow more comparisons to succeed.
-            self.level = string.join(fields[3:], ':')
+            self.level = ':'.join(fields[3:])
         else:
             self.level = None
 
@@ -694,7 +693,7 @@ def print_tree(head):
         s = ""
         for i in range(depth):
             s = s + "\t"
-        print s + str(node)
+        print(s + str(node))
 
 
 class Headers(Node):
@@ -801,7 +800,7 @@ class SupportMacros(Node):
         # are ordered correctly so that no macro is used before
         # it is defined
         s = set()
-        if self.map.has_key(perm):
+        if perm in self.map:
             for p in self.by_name(perm):
                 s.update(self.__expand_perm(p))
         else:
@@ -824,7 +823,7 @@ class SupportMacros(Node):
     def has_key(self, name):
         if not self.map:
             self.__gen_map()
-        return self.map.has_key(name)
+        return name in self.map
 
 class Require(Leaf):
     def __init__(self, parent=None):
diff --git a/sepolgen-1.2.2/src/sepolgen/util.py b/sepolgen-1.2.2/src/sepolgen/util.py
index 74a11f5..1fca971 100644
--- a/sepolgen-1.2.2/src/sepolgen/util.py
+++ b/sepolgen-1.2.2/src/sepolgen/util.py
@@ -16,6 +16,19 @@
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 #
+import locale
+import sys
+
+
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+    bytes_type=bytes
+    string_type=str
+else:
+    bytes_type=str
+    string_type=unicode
+
 
 class ConsoleProgressBar:
     def __init__(self, out, steps=100, indicator='#'):
@@ -76,6 +89,88 @@ def first(s, sorted=False):
         for x in s:
             return x
 
+def encode_input(text):
+    import locale
+    """Encode given text via preferred system encoding"""
+    # locale will often find out the correct encoding
+    encoding = locale.getpreferredencoding()
+    try:
+        encoded_text = text.encode(encoding)
+    except UnicodeError:
+    # if it fails to find correct encoding then ascii is used
+    # which may lead to UnicodeError if `text` contains non ascii signs
+    # utf-8 is our guess to fix the situation
+        encoded_text = text.encode('utf-8')
+    return encoded_text
+
+def decode_input(text):
+    import locale
+    """Decode given text via preferred system encoding"""
+    # locale will often find out the correct encoding
+    encoding = locale.getpreferredencoding()
+    try:
+        decoded_text = text.decode(encoding)
+    except UnicodeError:
+    # if it fails to find correct encoding then ascii is used
+    # which may lead to UnicodeError if `text` contains non ascii signs
+    # utf-8 is our guess to fix the situation
+        decoded_text = text.decode('utf-8')
+    return decoded_text
+
+class Comparison():
+    """Class used when implementing rich comparison.
+
+    Inherit from this class if you want to have a rich
+    comparison withing the class, afterwards implement
+    _compare function within your class."""
+
+    def _compare(self, other, method):
+        raise NotImplemented
+
+    def __eq__(self, other):
+        return self._compare(other, lambda a, b: a == b)
+
+    def __lt__(self, other):
+        return self._compare(other, lambda a, b: a < b)
+
+    def __le__(self, other):
+        return self._compare(other, lambda a, b: a <= b)
+
+    def __ge__(self, other):
+        return self._compare(other, lambda a, b: a >= b)
+
+    def __gt__(self, other):
+        return self._compare(other, lambda a, b: a > b)
+
+    def __ne__(self, other):
+        return self._compare(other, lambda a, b: a != b)
+
+if sys.version_info < (2,7):
+    # cmp_to_key function is missing in python2.6
+    def cmp_to_key(mycmp):
+        'Convert a cmp= function into a key= function'
+        class K:
+            def __init__(self, obj, *args):
+                self.obj = obj
+            def __lt__(self, other):
+                return mycmp(self.obj, other.obj) < 0
+            def __gt__(self, other):
+                return mycmp(self.obj, other.obj) > 0
+            def __eq__(self, other):
+                return mycmp(self.obj, other.obj) == 0
+            def __le__(self, other):
+                return mycmp(self.obj, other.obj) <= 0
+            def __ge__(self, other):
+                return mycmp(self.obj, other.obj) >= 0
+            def __ne__(self, other):
+                return mycmp(self.obj, other.obj) != 0
+        return K
+else:
+    from functools import cmp_to_key
+
+def cmp(first, second):
+    return (first > second) - (second > first)
+
 if __name__ == "__main__":
     import sys
     import time
diff --git a/sepolgen-1.2.2/src/sepolgen/yacc.py b/sepolgen-1.2.2/src/sepolgen/yacc.py
index bc4536d..f006354 100644
--- a/sepolgen-1.2.2/src/sepolgen/yacc.py
+++ b/sepolgen-1.2.2/src/sepolgen/yacc.py
@@ -67,7 +67,13 @@ default_lr  = 'LALR'           # Default LR table generation method
 
 error_count = 3                # Number of symbols that must be shifted to leave recovery mode
 
-import re, types, sys, cStringIO, hashlib, os.path
+import re, types, sys, hashlib, os.path
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from io import StringIO
+
+from . import util
 
 # Exception raised for yacc-related errors
 class YaccError(Exception):   pass
@@ -109,7 +115,7 @@ class YaccProduction:
         self.stack = stack
 
     def __getitem__(self,n):
-        if type(n) == types.IntType:
+        if type(n) == int:
              if n >= 0: return self.slice[n].value
              else: return self.stack[n].value
         else:
@@ -139,9 +145,9 @@ class YaccProduction:
 
     def pushback(self,n):
         if n <= 0:
-            raise ValueError, "Expected a positive value"
+            raise ValueError("Expected a positive value")
         if n > (len(self.slice)-1):
-            raise ValueError, "Can't push %d tokens. Only %d are available." % (n,len(self.slice)-1)
+            raise ValueError("Can't push %d tokens. Only %d are available." % (n,len(self.slice)-1))
         for i in range(0,n):
             self.pbstack.append(self.slice[-i-1])
 
@@ -157,7 +163,7 @@ class Parser:
         # object directly.
 
         if magic != "xyzzy":
-            raise YaccError, "Can't instantiate Parser. Use yacc() instead."
+            raise YaccError("Can't instantiate Parser. Use yacc() instead.")
 
         # Reset internal state
         self.productions = None          # List of productions
@@ -190,7 +196,7 @@ class Parser:
 
         # If no lexer was given, we will try to use the lex module
         if not lexer:
-            import lex
+            from . import lex
             lexer = lex.lexer
 
         pslice.lexer = lexer
@@ -221,7 +227,7 @@ class Parser:
             # is already set, we just use that. Otherwise, we'll pull
             # the next token off of the lookaheadstack or from the lexer
             if debug > 1:
-                print 'state', statestack[-1]
+                print('state', statestack[-1])
             if not lookahead:
                 if not lookaheadstack:
                     lookahead = get_token()     # Get the next token
@@ -239,7 +245,7 @@ class Parser:
             t = actions.get((s,ltype),None)
 
             if debug > 1:
-                print 'action', t
+                print('action', t)
             if t is not None:
                 if t > 0:
                     # shift a symbol on the stack
@@ -396,7 +402,7 @@ class Parser:
                 continue
 
             # Call an error function here
-            raise RuntimeError, "yacc: internal parser error!!!\n"
+            raise RuntimeError("yacc: internal parser error!!!\n")
 
 # -----------------------------------------------------------------------------
 #                          === Parser Construction ===
@@ -457,12 +463,12 @@ def validate_dict(d):
 
         if n[0:2] == 'p_':
             sys.stderr.write("yacc: Warning. '%s' not defined as a function\n" % n)
-        if 1 and isinstance(v,types.FunctionType) and v.func_code.co_argcount == 1:
+        if 1 and isinstance(v,types.FunctionType) and v.__code__.co_argcount == 1:
             try:
                 doc = v.__doc__.split(" ")
                 if doc[1] == ':':
-                    sys.stderr.write("%s:%d: Warning. Possible grammar rule '%s' defined without p_ prefix.\n" % (v.func_code.co_filename, v.func_code.co_firstlineno,n))
-            except StandardError:
+                    sys.stderr.write("%s:%d: Warning. Possible grammar rule '%s' defined without p_ prefix.\n" % (v.__code__.co_filename, v.__code__.co_firstlineno,n))
+            except Exception:
                 pass
 
 # -----------------------------------------------------------------------------
@@ -514,8 +520,8 @@ def initialize_vars():
 
     # File objects used when creating the parser.out debugging file
     global _vf, _vfc
-    _vf           = cStringIO.StringIO()
-    _vfc          = cStringIO.StringIO()
+    _vf           = StringIO()
+    _vfc          = StringIO()
 
 # -----------------------------------------------------------------------------
 # class Production:
@@ -581,7 +587,7 @@ class Production:
         # Precompute list of productions immediately following
         try:
             p.lrafter = Prodnames[p.prod[n+1]]
-        except (IndexError,KeyError),e:
+        except (IndexError,KeyError) as e:
             p.lrafter = []
         try:
             p.lrbefore = p.prod[n-1]
@@ -615,7 +621,7 @@ _is_identifier = re.compile(r'^[a-zA-Z0-9_-~]+$')
 
 def add_production(f,file,line,prodname,syms):
     
-    if Terminals.has_key(prodname):
+    if prodname in Terminals:
         sys.stderr.write("%s:%d: Illegal rule name '%s'. Already defined as a token.\n" % (file,line,prodname))
         return -1
     if prodname == 'error':
@@ -634,7 +640,7 @@ def add_production(f,file,line,prodname,syms):
                  if (len(c) > 1):
                       sys.stderr.write("%s:%d: Literal token %s in rule '%s' may only be a single character\n" % (file,line,s, prodname)) 
                       return -1
-                 if not Terminals.has_key(c):
+                 if c not in Terminals:
                       Terminals[c] = []
                  syms[x] = c
                  continue
@@ -646,7 +652,7 @@ def add_production(f,file,line,prodname,syms):
 
     # See if the rule is already in the rulemap
     map = "%s -> %s" % (prodname,syms)
-    if Prodmap.has_key(map):
+    if map in Prodmap:
         m = Prodmap[map]
         sys.stderr.write("%s:%d: Duplicate rule %s.\n" % (file,line, m))
         sys.stderr.write("%s:%d: Previous definition at %s:%d\n" % (file,line, m.file, m.line))
@@ -663,7 +669,7 @@ def add_production(f,file,line,prodname,syms):
             
     Productions.append(p)
     Prodmap[map] = p
-    if not Nonterminals.has_key(prodname):
+    if prodname not in Nonterminals:
         Nonterminals[prodname] = [ ]
     
     # Add all terminals to Terminals
@@ -687,13 +693,13 @@ def add_production(f,file,line,prodname,syms):
             del p.prod[i]
             continue
 
-        if Terminals.has_key(t):
+        if t in Terminals:
             Terminals[t].append(p.number)
             # Is a terminal.  We'll assign a precedence to p based on this
             if not hasattr(p,"prec"):
                 p.prec = Precedence.get(t,('right',0))
         else:
-            if not Nonterminals.has_key(t):
+            if t not in Nonterminals:
                 Nonterminals[t] = [ ]
             Nonterminals[t].append(p.number)
         i += 1
@@ -722,8 +728,8 @@ def add_production(f,file,line,prodname,syms):
 # and adds rules to the grammar
 
 def add_function(f):
-    line = f.func_code.co_firstlineno
-    file = f.func_code.co_filename
+    line = f.__code__.co_firstlineno
+    file = f.__code__.co_filename
     error = 0
 
     if isinstance(f,types.MethodType):
@@ -731,11 +737,11 @@ def add_function(f):
     else:
         reqdargs = 1
         
-    if f.func_code.co_argcount > reqdargs:
+    if f.__code__.co_argcount > reqdargs:
         sys.stderr.write("%s:%d: Rule '%s' has too many arguments.\n" % (file,line,f.__name__))
         return -1
 
-    if f.func_code.co_argcount < reqdargs:
+    if f.__code__.co_argcount < reqdargs:
         sys.stderr.write("%s:%d: Rule '%s' requires an argument.\n" % (file,line,f.__name__))
         return -1
           
@@ -776,7 +782,7 @@ def add_function(f):
                 error += e
 
                 
-            except StandardError:
+            except Exception:
                 sys.stderr.write("%s:%d: Syntax error in rule '%s'\n" % (file,dline,ps))
                 error -= 1
     else:
@@ -793,7 +799,7 @@ def compute_reachable():
     (Unused terminals have already had their warning.)
     '''
     Reachable = { }
-    for s in Terminals.keys() + Nonterminals.keys():
+    for s in list(Terminals.keys()) + list(Nonterminals.keys()):
         Reachable[s] = 0
 
     mark_reachable_from( Productions[0].prod[0], Reachable )
@@ -872,7 +878,7 @@ def compute_terminates():
     some_error = 0
     for (s,terminates) in Terminates.items():
         if not terminates:
-            if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
+            if s not in Prodnames and s not in Terminals and s != 'error':
                 # s is used-but-not-defined, and we've already warned of that,
                 # so it would be overkill to say that it's also non-terminating.
                 pass
@@ -893,7 +899,7 @@ def verify_productions(cycle_check=1):
         if not p: continue
 
         for s in p.prod:
-            if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
+            if s not in Prodnames and s not in Terminals and s != 'error':
                 sys.stderr.write("%s:%d: Symbol '%s' used, but not defined as a token or a rule.\n" % (p.file,p.line,s))
                 error = 1
                 continue
@@ -935,12 +941,12 @@ def verify_productions(cycle_check=1):
 
     if yaccdebug:
         _vf.write("\nTerminals, with rules where they appear\n\n")
-        ks = Terminals.keys()
+        ks = list(Terminals.keys())
         ks.sort()
         for k in ks:
             _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Terminals[k]])))
         _vf.write("\nNonterminals, with rules where they appear\n\n")
-        ks = Nonterminals.keys()
+        ks = list(Nonterminals.keys())
         ks.sort()
         for k in ks:
             _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Nonterminals[k]])))
@@ -1003,7 +1009,7 @@ def add_precedence(plist):
                 sys.stderr.write("yacc: Invalid precedence '%s'\n" % prec)
                 return -1
             for t in terms:
-                if Precedence.has_key(t):
+                if t in Precedence:
                     sys.stderr.write("yacc: Precedence already specified for terminal '%s'\n" % t)
                     error += 1
                     continue
@@ -1087,7 +1093,7 @@ def compute_follow(start=None):
             # Here is the production set
             for i in range(len(p.prod)):
                 B = p.prod[i]
-                if Nonterminals.has_key(B):
+                if B in Nonterminals:
                     # Okay. We got a non-terminal in a production
                     fst = first(p.prod[i+1:])
                     hasempty = 0
@@ -1259,7 +1265,7 @@ def lr0_items():
         for x in asyms.keys():
             g = lr0_goto(I,x)
             if not g:  continue
-            if _lr0_cidhash.has_key(id(g)): continue
+            if id(g) in _lr0_cidhash: continue
             _lr0_cidhash[id(g)] = len(C)            
             C.append(g)
             
@@ -1305,7 +1311,7 @@ def compute_nullable_nonterminals():
                 nullable[p.name] = 1
                 continue
            for t in p.prod:
-                if not nullable.has_key(t): break
+                if t not in nullable: break
            else:
                 nullable[p.name] = 1
        if len(nullable) == num_nullable: break
@@ -1329,7 +1335,7 @@ def find_nonterminal_transitions(C):
          for p in C[state]:
              if p.lr_index < p.len - 1:
                   t = (state,p.prod[p.lr_index+1])
-                  if Nonterminals.has_key(t[1]):
+                  if t[1] in Nonterminals:
                         if t not in trans: trans.append(t)
          state = state + 1
      return trans
@@ -1352,7 +1358,7 @@ def dr_relation(C,trans,nullable):
     for p in g:
        if p.lr_index < p.len - 1:
            a = p.prod[p.lr_index+1]
-           if Terminals.has_key(a):
+           if a in Terminals:
                if a not in terms: terms.append(a)
 
     # This extra bit is to handle the start state
@@ -1377,7 +1383,7 @@ def reads_relation(C, trans, empty):
     for p in g:
         if p.lr_index < p.len - 1:
              a = p.prod[p.lr_index + 1]
-             if empty.has_key(a):
+             if a in empty:
                   rel.append((j,a))
 
     return rel
@@ -1437,15 +1443,15 @@ def compute_lookback_includes(C,trans,nullable):
                  t = p.prod[lr_index]
 
                  # Check to see if this symbol and state are a non-terminal transition
-                 if dtrans.has_key((j,t)):
+                 if (j,t) in dtrans:
                        # Yes.  Okay, there is some chance that this is an includes relation
                        # the only way to know for certain is whether the rest of the 
                        # production derives empty
 
                        li = lr_index + 1
                        while li < p.len:
-                            if Terminals.has_key(p.prod[li]): break      # No forget it
-                            if not nullable.has_key(p.prod[li]): break
+                            if p.prod[li] in Terminals: break      # No forget it
+                            if p.prod[li] not in nullable: break
                             li = li + 1
                        else:
                             # Appears to be a relation between (j,t) and (state,N)
@@ -1466,7 +1472,7 @@ def compute_lookback_includes(C,trans,nullable):
                  else:
                       lookb.append((j,r))
         for i in includes:
-             if not includedict.has_key(i): includedict[i] = []
+             if i not in includedict: includedict[i] = []
              includedict[i].append((state,N))
         lookdict[(state,N)] = lookb
 
@@ -1513,11 +1519,11 @@ def traverse(x,N,stack,F,X,R,FP):
         for a in F.get(y,[]):
             if a not in F[x]: F[x].append(a)
     if N[x] == d:
-       N[stack[-1]] = sys.maxint
+       N[stack[-1]] = sys.maxsize
        F[stack[-1]] = F[x]
        element = stack.pop()
        while element != x:
-           N[stack[-1]] = sys.maxint
+           N[stack[-1]] = sys.maxsize
            F[stack[-1]] = F[x]
            element = stack.pop()
 
@@ -1577,7 +1583,7 @@ def add_lookaheads(lookbacks,followset):
     for trans,lb in lookbacks.items():
         # Loop over productions in lookback
         for state,p in lb:
-             if not p.lookaheads.has_key(state):
+             if state not in p.lookaheads:
                   p.lookaheads[state] = []
              f = followset.get(trans,[])
              for a in f:
@@ -1709,7 +1715,7 @@ def lr_parse_table(method):
                 else:
                     i = p.lr_index
                     a = p.prod[i+1]       # Get symbol right after the "."
-                    if Terminals.has_key(a):
+                    if a in Terminals:
                         g = lr0_goto(I,a)
                         j = _lr0_cidhash.get(id(g),-1)
                         if j >= 0:
@@ -1751,22 +1757,22 @@ def lr_parse_table(method):
                                 action[st,a] = j
                                 actionp[st,a] = p
                                 
-            except StandardError,e:
-                raise YaccError, "Hosed in lr_parse_table", e
+            except Exception as e:
+                raise YaccError("Hosed in lr_parse_table").with_traceback(e)
 
         # Print the actions associated with each terminal
         if yaccdebug:
           _actprint = { }
           for a,p,m in actlist:
-            if action.has_key((st,a)):
+            if (st,a) in action:
                 if p is actionp[st,a]:
                     _vf.write("    %-15s %s\n" % (a,m))
                     _actprint[(a,m)] = 1
           _vf.write("\n")
           for a,p,m in actlist:
-            if action.has_key((st,a)):
+            if (st,a) in action:
                 if p is not actionp[st,a]:
-                    if not _actprint.has_key((a,m)):
+                    if (a,m) not in _actprint:
                         _vf.write("  ! %-15s [ %s ]\n" % (a,m))
                         _actprint[(a,m)] = 1
             
@@ -1776,7 +1782,7 @@ def lr_parse_table(method):
         nkeys = { }
         for ii in I:
             for s in ii.usyms:
-                if Nonterminals.has_key(s):
+                if s in Nonterminals:
                     nkeys[s] = None
         for n in nkeys.keys():
             g = lr0_goto(I,n)
@@ -1914,15 +1920,14 @@ del _lr_goto_items
         
         f.close()
 
-    except IOError,e:
-        print "Unable to create '%s'" % filename
-        print e
-        return
+    except IOError as e:
+        print("Unable to create '%s'" % filename)
+        print(e)
 
 def lr_read_tables(module=tab_module,optimize=0):
     global _lr_action, _lr_goto, _lr_productions, _lr_method
     try:
-        exec "import %s as parsetab" % module
+        exec("import %s as parsetab" % module)
         
         if (optimize) or (Signature.digest() == parsetab._lr_signature):
             _lr_action = parsetab._lr_action
@@ -1938,13 +1943,13 @@ def lr_read_tables(module=tab_module,optimize=0):
 
 
 # Available instance types.  This is used when parsers are defined by a class.
-# it's a little funky because I want to preserve backwards compatibility
-# with Python 2.0 where types.ObjectType is undefined.
+# In Python3 the InstanceType and ObjectType are no more, they've passed, ceased
+# to be, they are ex-classes along with old-style classes
 
 try:
    _INSTANCETYPE = (types.InstanceType, types.ObjectType)
 except AttributeError:
-   _INSTANCETYPE = types.InstanceType
+   _INSTANCETYPE = object
 
 # -----------------------------------------------------------------------------
 # yacc(module)
@@ -1962,7 +1967,7 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
 
 
     # Add parsing method to signature
-    Signature.update(method)
+    Signature.update(util.encode_input(method))
     
     # If a "module" parameter was supplied, extract its dictionary.
     # Note: a module may in fact be an instance as well.
@@ -1977,7 +1982,7 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
             for i in _items:
                 ldict[i[0]] = i[1]
         else:
-            raise ValueError,"Expected a module"
+            raise ValueError("Expected a module")
         
     else:
         # No module given.  We might be able to get information from the caller.
@@ -1995,7 +2000,7 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
     if not start:
         start = ldict.get("start",None)
     if start:
-        Signature.update(start)
+        Signature.update(util.encode_input(start))
 
     # If running in optimized mode.  We're going to
 
@@ -2023,24 +2028,24 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
             tokens = ldict.get("tokens",None)
     
         if not tokens:
-            raise YaccError,"module does not define a list 'tokens'"
-        if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
-            raise YaccError,"tokens must be a list or tuple."
+            raise YaccError("module does not define a list 'tokens'")
+        if not (isinstance(tokens,list) or isinstance(tokens,tuple)):
+            raise YaccError("tokens must be a list or tuple.")
 
         # Check to see if a requires dictionary is defined.
         requires = ldict.get("require",None)
         if requires:
-            if not (isinstance(requires,types.DictType)):
-                raise YaccError,"require must be a dictionary."
+            if not (isinstance(requires,dict)):
+                raise YaccError("require must be a dictionary.")
 
             for r,v in requires.items():
                 try:
-                    if not (isinstance(v,types.ListType)):
+                    if not (isinstance(v,list)):
                         raise TypeError
                     v1 = [x.split(".") for x in v]
                     Requires[r] = v1
-                except StandardError:
-                    print "Invalid specification for rule '%s' in require. Expected a list of strings" % r            
+                except Exception:
+                    print("Invalid specification for rule '%s' in require. Expected a list of strings" % r)
 
         
         # Build the dictionary of terminals.  We a record a 0 in the
@@ -2048,12 +2053,12 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
         # used in the grammar
 
         if 'error' in tokens:
-            print "yacc: Illegal token 'error'.  Is a reserved word."
-            raise YaccError,"Illegal token name"
+            print("yacc: Illegal token 'error'.  Is a reserved word.")
+            raise YaccError("Illegal token name")
 
         for n in tokens:
-            if Terminals.has_key(n):
-                print "yacc: Warning. Token '%s' multiply defined." % n
+            if n in Terminals:
+                print("yacc: Warning. Token '%s' multiply defined." % n)
             Terminals[n] = [ ]
 
         Terminals['error'] = [ ]
@@ -2061,13 +2066,13 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
         # Get the precedence map (if any)
         prec = ldict.get("precedence",None)
         if prec:
-            if not (isinstance(prec,types.ListType) or isinstance(prec,types.TupleType)):
-                raise YaccError,"precedence must be a list or tuple."
+            if not (isinstance(prec,list) or isinstance(prec,tuple)):
+                raise YaccError("precedence must be a list or tuple.")
             add_precedence(prec)
-            Signature.update(repr(prec))
+            Signature.update(util.encode_input(repr(prec)))
 
         for n in tokens:
-            if not Precedence.has_key(n):
+            if n not in Precedence:
                 Precedence[n] = ('right',0)         # Default, right associative, 0 precedence
 
         # Look for error handler
@@ -2078,17 +2083,17 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
             elif isinstance(ef, types.MethodType):
                 ismethod = 1
             else:
-                raise YaccError,"'p_error' defined, but is not a function or method."                
-            eline = ef.func_code.co_firstlineno
-            efile = ef.func_code.co_filename
+                raise YaccError("'p_error' defined, but is not a function or method.")
+            eline = ef.__code__.co_firstlineno
+            efile = ef.__code__.co_filename
             files[efile] = None
 
-            if (ef.func_code.co_argcount != 1+ismethod):
-                raise YaccError,"%s:%d: p_error() requires 1 argument." % (efile,eline)
+            if (ef.__code__.co_argcount != 1+ismethod):
+                raise YaccError("%s:%d: p_error() requires 1 argument." % (efile,eline))
             global Errorfunc
             Errorfunc = ef
         else:
-            print "yacc: Warning. no p_error() function is defined."
+            print("yacc: Warning. no p_error() function is defined.")
             
         # Get the list of built-in functions with p_ prefix
         symbols = [ldict[f] for f in ldict.keys()
@@ -2097,27 +2102,27 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
 
         # Check for non-empty symbols
         if len(symbols) == 0:
-            raise YaccError,"no rules of the form p_rulename are defined."
+            raise YaccError("no rules of the form p_rulename are defined.")
     
         # Sort the symbols by line number
-        symbols.sort(lambda x,y: cmp(x.func_code.co_firstlineno,y.func_code.co_firstlineno))
+        symbols.sort(key=lambda x: x.__code__.co_firstlineno)
 
         # Add all of the symbols to the grammar
         for f in symbols:
             if (add_function(f)) < 0:
                 error += 1
             else:
-                files[f.func_code.co_filename] = None
+                files[f.__code__.co_filename] = None
 
         # Make a signature of the docstrings
         for f in symbols:
             if f.__doc__:
-                Signature.update(f.__doc__)
+                Signature.update(util.encode_input(f.__doc__))
     
         lr_init_vars()
 
         if error:
-            raise YaccError,"Unable to construct parser."
+            raise YaccError("Unable to construct parser.")
 
         if not lr_read_tables(tabmodule):
 
@@ -2129,8 +2134,8 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
             # Validate dictionary
             validate_dict(ldict)
 
-            if start and not Prodnames.has_key(start):
-                raise YaccError,"Bad starting symbol '%s'" % start
+            if start and start not in Prodnames:
+                raise YaccError("Bad starting symbol '%s'" % start)
         
             augment_grammar(start)    
             error = verify_productions(cycle_check=check_recursion)
@@ -2138,7 +2143,7 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
                if (type(f) in (types.FunctionType,types.MethodType) and ldict[f].__name__[:2] != 'p_')]
 
             if error:
-                raise YaccError,"Unable to construct parser."
+                raise YaccError("Unable to construct parser.")
             
             build_lritems()
             compute_first1()
@@ -2147,7 +2152,7 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
             if method in ['SLR','LALR']:
                 lr_parse_table(method)
             else:
-                raise YaccError, "Unknown parsing method '%s'" % method
+                raise YaccError("Unknown parsing method '%s'" % method)
 
             if write_tables:
                 lr_write_tables(tabmodule,outputdir)        
@@ -2159,8 +2164,8 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
                     f.write("\n\n")
                     f.write(_vf.getvalue())
                     f.close()
-                except IOError,e:
-                    print "yacc: can't create '%s'" % debugfile,e
+                except IOError as e:
+                    print("yacc: can't create '%s'" % debugfile,e)
         
     # Made it here.   Create a parser object and set up its internal state.
     # Set global parse() method to bound method of parser object.
@@ -2205,5 +2210,5 @@ def yacc_cleanup():
     
 # Stub that raises an error if parsing is attempted without first calling yacc()
 def parse(*args,**kwargs):
-    raise YaccError, "yacc: No parser built with yacc()"
+    raise YaccError("yacc: No parser built with yacc()")
 
diff --git a/sepolgen-1.2.2/tests/test_access.py b/sepolgen-1.2.2/tests/test_access.py
index fec699e..d45a823 100644
--- a/sepolgen-1.2.2/tests/test_access.py
+++ b/sepolgen-1.2.2/tests/test_access.py
@@ -32,7 +32,7 @@ class TestAccessVector(unittest.TestCase):
         self.assertEqual(a.obj_class, None)
         self.assertTrue(isinstance(a.perms, refpolicy.IdSet))
         self.assertTrue(isinstance(a.audit_msgs, type([])))
-        self.assertEquals(len(a.audit_msgs), 0)
+        self.assertEqual(len(a.audit_msgs), 0)
 
         # Construction from a list
         a = access.AccessVector()
@@ -72,8 +72,10 @@ class TestAccessVector(unittest.TestCase):
         self.assertEqual(l[0], "foo")
         self.assertEqual(l[1], "bar")
         self.assertEqual(l[2], "file")
-        self.assertEqual(l[3], "read")
-        self.assertEqual(l[4], "write")
+        perms = l[3:]
+        perms.sort()
+        self.assertEqual(perms[0], "read")
+        self.assertEqual(perms[1], "write")
 
     def test_to_string(self):
         a = access.AccessVector()
@@ -82,8 +84,21 @@ class TestAccessVector(unittest.TestCase):
         a.obj_class = "file"
         a.perms.update(["read", "write"])
 
-        self.assertEquals(str(a), "allow foo bar:file { read write };")
-        self.assertEquals(a.to_string(), "allow foo bar:file { read write };")
+        first, second = str(a).split(':')
+        self.assertEqual(first, "allow foo bar")
+        second = second.split(' ')
+        second.sort()
+        expected = "file { read write };".split(' ')
+        expected.sort()
+        self.assertEqual(second, expected)
+
+        first, second = a.to_string().split(':')
+        self.assertEqual(first, "allow foo bar")
+        second = second.split(' ')
+        second.sort()
+        expected = "file { read write };".split(' ')
+        expected.sort()
+        self.assertEqual(second, expected)
 
     def test_cmp(self):
         a = access.AccessVector()
@@ -98,36 +113,38 @@ class TestAccessVector(unittest.TestCase):
         b.obj_class = "file"
         b.perms.update(["read", "write"])
 
-        self.assertEquals(a, b)
+        self.assertEqual(a, b)
 
         # Source Type
         b.src_type = "baz"
-        self.assertEquals(cmp(a, b), 1)
+        self.assertNotEqual(a, b)
+        self.assertTrue(a > b)
 
         b.src_type = "gaz"
-        self.assertEquals(cmp(a, b), -1)
+        self.assertNotEqual(a, b)
+        self.assertTrue(a < b)
 
         # Target Type
         b.src_type = "foo"
         b.tgt_type = "aar"
-        self.assertEquals(cmp(a, b), 1)
+        self.assertNotEqual(a, b)
+        self.assertTrue(a > b)
 
         b.tgt_type = "gaz"
-        self.assertEquals(cmp(a, b), -1)
+        self.assertNotEqual(a, b)
+        self.assertTrue(a < b)
 
         # Perms
         b.tgt_type = "bar"
         b.perms = refpolicy.IdSet(["read"])
-        ret = cmp(a, b)
-        self.assertEquals(ret, 1)
+        self.assertNotEqual(a, b)
+        self.assertTrue(a > b)
 
         b.perms = refpolicy.IdSet(["read", "write", "append"])
-        ret = cmp(a, b)
-        self.assertEquals(ret, -1)
+        self.assertNotEqual(a, b)
 
         b.perms = refpolicy.IdSet(["read", "append"])
-        ret = cmp(a, b)
-        self.assertEquals(ret, 1)
+        self.assertNotEqual(a, b)
                          
 class TestUtilFunctions(unittest.TestCase):
     def test_is_idparam(self):
@@ -149,7 +166,7 @@ class TestUtilFunctions(unittest.TestCase):
         rule.perms.add("write")
 
         avs = access.avrule_to_access_vectors(rule)
-        self.assertEquals(len(avs), 8)
+        self.assertEqual(len(avs), 8)
         comps = [("foo", "what", "dir"),
                  ("foo", "what", "file"),
                  ("foo", "bar", "dir"),
@@ -160,15 +177,15 @@ class TestUtilFunctions(unittest.TestCase):
                  ("baz", "bar", "file")]
         status = [False] * 8
         for av in access.avrule_to_access_vectors(rule):
-            self.assertEquals(av.perms, refpolicy.IdSet(["read", "write"]))
-            for i in xrange(len(comps)):
+            self.assertEqual(av.perms, refpolicy.IdSet(["read", "write"]))
+            for i in range(len(comps)):
                 if comps[i][0] == av.src_type and \
                    comps[i][1] == av.tgt_type and \
                    comps[i][2] == av.obj_class:
                     status[i] = True
 
         for s in status:
-            self.assertEquals(s, True)
+            self.assertEqual(s, True)
                    
 
 class TestAccessVectorSet(unittest.TestCase):
@@ -203,18 +220,18 @@ class TestAccessVectorSet(unittest.TestCase):
                  ("baz", "bar", "file")]
         status = [False] * 8
         for av in self.s:
-            self.assertEquals(av.perms, refpolicy.IdSet(["read", "write"]))
-            for i in xrange(len(comps)):
+            self.assertEqual(av.perms, refpolicy.IdSet(["read", "write"]))
+            for i in range(len(comps)):
                 if comps[i][0] == av.src_type and \
                    comps[i][1] == av.tgt_type and \
                    comps[i][2] == av.obj_class:
                     status[i] = True
 
         for s in status:
-            self.assertEquals(s, True)
+            self.assertEqual(s, True)
 
     def test_len(self):
-        self.assertEquals(len(self.s), 8)
+        self.assertEqual(len(self.s), 8)
 
     def test_list(self):
         a = access.AccessVectorSet()
@@ -223,15 +240,22 @@ class TestAccessVectorSet(unittest.TestCase):
         a.add("what", "bar", "file", refpolicy.IdSet(["read", "write"]))
 
         avl = a.to_list()
+        avl.sort()
 
         test_l = [['what','bar','file','read','write'],
                   ['$1','foo','file','read','write'],
                   ['$1','bar','file','read','write']]
+        test_l.sort()
 
         for a,b in zip(test_l, avl):
             self.assertEqual(len(a), len(b))
-            for x,y in zip(a,b):
+            for x,y in list(zip(a,b))[:3]:
                 self.assertEqual(x, y)
+            perms1 = a[3:]
+            perms2 = b[3:]
+            perms1.sort()
+            perms2.sort()
+            self.assertEqual(perms1, perms2)
                 
         b = access.AccessVectorSet()
         b.from_list(avl)
diff --git a/sepolgen-1.2.2/tests/test_audit.py b/sepolgen-1.2.2/tests/test_audit.py
index 7b74220..6379954 100644
--- a/sepolgen-1.2.2/tests/test_audit.py
+++ b/sepolgen-1.2.2/tests/test_audit.py
@@ -60,29 +60,29 @@ class TestAVCMessage(unittest.TestCase):
     def test_defs(self):
         avc = sepolgen.audit.AVCMessage(audit1)
         sc = sepolgen.refpolicy.SecurityContext()
-        self.assertEquals(avc.scontext, sc)
-        self.assertEquals(avc.tcontext, sc)
-        self.assertEquals(avc.tclass, "")
-        self.assertEquals(avc.accesses, [])
+        self.assertEqual(avc.scontext, sc)
+        self.assertEqual(avc.tcontext, sc)
+        self.assertEqual(avc.tclass, "")
+        self.assertEqual(avc.accesses, [])
 
     def test_granted(self):
         avc = sepolgen.audit.AVCMessage(granted1)
         avc.from_split_string(granted1.split())
 
-        self.assertEquals(avc.scontext.user, "user_u")
-        self.assertEquals(avc.scontext.role, "system_r")
-        self.assertEquals(avc.scontext.type, "unconfined_t")
-        self.assertEquals(avc.scontext.level, "s0")
+        self.assertEqual(avc.scontext.user, "user_u")
+        self.assertEqual(avc.scontext.role, "system_r")
+        self.assertEqual(avc.scontext.type, "unconfined_t")
+        self.assertEqual(avc.scontext.level, "s0")
 
-        self.assertEquals(avc.tcontext.user, "user_u")
-        self.assertEquals(avc.tcontext.role, "object_r")
-        self.assertEquals(avc.tcontext.type, "user_home_t")
-        self.assertEquals(avc.tcontext.level, "s0")
+        self.assertEqual(avc.tcontext.user, "user_u")
+        self.assertEqual(avc.tcontext.role, "object_r")
+        self.assertEqual(avc.tcontext.type, "user_home_t")
+        self.assertEqual(avc.tcontext.level, "s0")
         
-        self.assertEquals(avc.tclass, "file")
-        self.assertEquals(avc.accesses, ["getattr"])
+        self.assertEqual(avc.tclass, "file")
+        self.assertEqual(avc.accesses, ["getattr"])
 
-        self.assertEquals(avc.denial, False)
+        self.assertEqual(avc.denial, False)
 
 
     def test_from_split_string(self):
@@ -91,54 +91,54 @@ class TestAVCMessage(unittest.TestCase):
         recs = audit1.split()
         avc.from_split_string(recs)
 
-        self.assertEquals(avc.header, "audit(1158064002.046:4):")
-        self.assertEquals(avc.scontext.user, "user_u")
-        self.assertEquals(avc.scontext.role, "system_r")
-        self.assertEquals(avc.scontext.type, "bluetooth_helper_t")
-        self.assertEquals(avc.scontext.level, "s0-s0:c0")
+        self.assertEqual(avc.header, "audit(1158064002.046:4):")
+        self.assertEqual(avc.scontext.user, "user_u")
+        self.assertEqual(avc.scontext.role, "system_r")
+        self.assertEqual(avc.scontext.type, "bluetooth_helper_t")
+        self.assertEqual(avc.scontext.level, "s0-s0:c0")
 
-        self.assertEquals(avc.tcontext.user, "system_u")
-        self.assertEquals(avc.tcontext.role, "object_r")
-        self.assertEquals(avc.tcontext.type, "xdm_tmp_t")
-        self.assertEquals(avc.tcontext.level, "s0")
+        self.assertEqual(avc.tcontext.user, "system_u")
+        self.assertEqual(avc.tcontext.role, "object_r")
+        self.assertEqual(avc.tcontext.type, "xdm_tmp_t")
+        self.assertEqual(avc.tcontext.level, "s0")
 
-        self.assertEquals(avc.tclass, "file")
-        self.assertEquals(avc.accesses, ["read"])
+        self.assertEqual(avc.tclass, "file")
+        self.assertEqual(avc.accesses, ["read"])
 
-        self.assertEquals(avc.comm, "bluez-pin")
+        self.assertEqual(avc.comm, "bluez-pin")
 
 
-        self.assertEquals(avc.denial, True)
+        self.assertEqual(avc.denial, True)
 
         # audit daemon message
         avc = sepolgen.audit.AVCMessage(audit2)
         recs = audit2.split()
         avc.from_split_string(recs)
 
-        self.assertEquals(avc.header, "audit(1158584779.745:708):")
-        self.assertEquals(avc.scontext.user, "user_u")
-        self.assertEquals(avc.scontext.role, "system_r")
-        self.assertEquals(avc.scontext.type, "vpnc_t")
-        self.assertEquals(avc.scontext.level, "s0")
+        self.assertEqual(avc.header, "audit(1158584779.745:708):")
+        self.assertEqual(avc.scontext.user, "user_u")
+        self.assertEqual(avc.scontext.role, "system_r")
+        self.assertEqual(avc.scontext.type, "vpnc_t")
+        self.assertEqual(avc.scontext.level, "s0")
 
-        self.assertEquals(avc.tcontext.user, "user_u")
-        self.assertEquals(avc.tcontext.role, "system_r")
-        self.assertEquals(avc.tcontext.type, "vpnc_t")
-        self.assertEquals(avc.tcontext.level, "s0")
+        self.assertEqual(avc.tcontext.user, "user_u")
+        self.assertEqual(avc.tcontext.role, "system_r")
+        self.assertEqual(avc.tcontext.type, "vpnc_t")
+        self.assertEqual(avc.tcontext.level, "s0")
 
-        self.assertEquals(avc.tclass, "capability")
-        self.assertEquals(avc.accesses, ["dac_read_search"])
+        self.assertEqual(avc.tclass, "capability")
+        self.assertEqual(avc.accesses, ["dac_read_search"])
 
-        self.assertEquals(avc.comm, "sh")
+        self.assertEqual(avc.comm, "sh")
 
-        self.assertEquals(avc.denial, True)
+        self.assertEqual(avc.denial, True)
 
 class TestPathMessage(unittest.TestCase):
     def test_from_split_string(self):
         path = sepolgen.audit.PathMessage(path1)
         recs = path1.split()
         path.from_split_string(recs)
-        self.assertEquals(path.path, "/usr/lib/sa/sa1")
+        self.assertEqual(path.path, "/usr/lib/sa/sa1")
 
 # TODO - add tests for the other message types
 
@@ -149,27 +149,28 @@ class TestAuditParser(unittest.TestCase):
     def test_parse_string(self):
         a = sepolgen.audit.AuditParser()
         a.parse_string(log1)
-        self.assertEquals(len(a.avc_msgs), 11)
-        self.assertEquals(len(a.compute_sid_msgs), 0)
-        self.assertEquals(len(a.invalid_msgs), 0)
-        self.assertEquals(len(a.policy_load_msgs), 0)
-        self.assertEquals(len(a.path_msgs), 1)
+        self.assertEqual(len(a.avc_msgs), 11)
+        self.assertEqual(len(a.compute_sid_msgs), 0)
+        self.assertEqual(len(a.invalid_msgs), 0)
+        self.assertEqual(len(a.policy_load_msgs), 0)
+        self.assertEqual(len(a.path_msgs), 1)
 
     def test_post_process(self):
         a = sepolgen.audit.AuditParser()
         a.parse_string(log2)
-        self.assertEquals(len(a.avc_msgs), 2)
-        self.assertEquals(a.avc_msgs[0].path, "/usr/lib/sa/sa1")
-        self.assertEquals(a.avc_msgs[1].path, "/usr/lib/sa/sa1")
+        self.assertEqual(len(a.avc_msgs), 2)
+        self.assertEqual(a.avc_msgs[0].path, "/usr/lib/sa/sa1")
+        self.assertEqual(a.avc_msgs[1].path, "/usr/lib/sa/sa1")
 
     def test_parse_file(self):
         f = open("audit.txt")
         a = sepolgen.audit.AuditParser()
         a.parse_file(f)
-        self.assertEquals(len(a.avc_msgs), 21)
-        self.assertEquals(len(a.compute_sid_msgs), 0)
-        self.assertEquals(len(a.invalid_msgs), 0)
-        self.assertEquals(len(a.policy_load_msgs), 0)
+        f.close()
+        self.assertEqual(len(a.avc_msgs), 21)
+        self.assertEqual(len(a.compute_sid_msgs), 0)
+        self.assertEqual(len(a.invalid_msgs), 0)
+        self.assertEqual(len(a.policy_load_msgs), 0)
 
 class TestGeneration(unittest.TestCase):
     def test_generation(self):
diff --git a/sepolgen-1.2.2/tests/test_interfaces.py b/sepolgen-1.2.2/tests/test_interfaces.py
index b589bdf..a55f7db 100644
--- a/sepolgen-1.2.2/tests/test_interfaces.py
+++ b/sepolgen-1.2.2/tests/test_interfaces.py
@@ -202,11 +202,11 @@ class TestInterfaceSet(unittest.TestCase):
         i = interfaces.InterfaceSet()
         i.add_headers(h)
 
-        self.assertEquals(len(i.interfaces), 1)
+        self.assertEqual(len(i.interfaces), 1)
         for key, interface in i.interfaces.items():
-            self.assertEquals(key, interface.name)
-            self.assertEquals(key, "foo")
-            self.assertEquals(len(interface.access), 2)
+            self.assertEqual(key, interface.name)
+            self.assertEqual(key, "foo")
+            self.assertEqual(len(interface.access), 2)
 
             # Check the access vectors
             comp_avs = [["$1", "usr_t", "dir", "create", "add_name"],
@@ -215,21 +215,21 @@ class TestInterfaceSet(unittest.TestCase):
             self.assertTrue(ret)
 
             # Check the params
-            self.assertEquals(len(interface.params), 1)
+            self.assertEqual(len(interface.params), 1)
             for param in interface.params.values():
-                self.assertEquals(param.type, refpolicy.SRC_TYPE)
-                self.assertEquals(param.name, "$1")
-                self.assertEquals(param.num, 1)
-                self.assertEquals(param.required, True)
+                self.assertEqual(param.type, refpolicy.SRC_TYPE)
+                self.assertEqual(param.name, "$1")
+                self.assertEqual(param.num, 1)
+                self.assertEqual(param.required, True)
 
     def test_expansion(self):
         h = refparser.parse(test_expansion)
         i = interfaces.InterfaceSet()
         i.add_headers(h)
 
-        self.assertEquals(len(i.interfaces), 3)
+        self.assertEqual(len(i.interfaces), 3)
         for key, interface in i.interfaces.items():
-            self.assertEquals(key, interface.name)
+            self.assertEqual(key, interface.name)
             if key == "foo":
                 comp_avs = [["$1", "usr_t", "dir", "create", "add_name"],
                             ["$1", "usr_t", "file", "read", "write"]]
@@ -268,6 +268,7 @@ class TestInterfaceSet(unittest.TestCase):
         i2 = interfaces.InterfaceSet()
         f = open("output")
         i2.from_file(f)
+        f.close()
         if_status = [False, False, False]
         for ifv in i2.interfaces.values():
             if ifv.name == "files_search_usr":
@@ -277,6 +278,6 @@ class TestInterfaceSet(unittest.TestCase):
             if ifv.name == "files_exec_usr_files":
                 if_status[2] = True
 
-        self.assertEquals(if_status[0], True)
-        self.assertEquals(if_status[1], True)
-        self.assertEquals(if_status[2], True)
+        self.assertEqual(if_status[0], True)
+        self.assertEqual(if_status[1], True)
+        self.assertEqual(if_status[2], True)
diff --git a/sepolgen-1.2.2/tests/test_matching.py b/sepolgen-1.2.2/tests/test_matching.py
index 161e001..3ecb80b 100644
--- a/sepolgen-1.2.2/tests/test_matching.py
+++ b/sepolgen-1.2.2/tests/test_matching.py
@@ -33,15 +33,15 @@ class TestMatch(unittest.TestCase):
         b.dist = 100
         b.info_dir_change = True
 
-        self.assertEquals(a, b)
+        self.assertEqual(a, b)
         b.info_dir_change = False
-        self.assertEquals(cmp(a, b), 1)
-        self.assertEquals(cmp(b, a), -1)
+        self.assertTrue((a > b))
+        self.assertTrue((b < a))
 
         b.dist = 200
 
-        self.assertEquals(cmp(a, b), -1)
-        self.assertEquals(cmp(b, a), 1)
+        self.assertTrue((a < b))
+        self.assertTrue((b > a))
 
 class TestMatchList(unittest.TestCase):
     def test_append(self):
@@ -90,7 +90,7 @@ class TestMatchList(unittest.TestCase):
         for x, y in zip(l, ml):
             self.assertEqual(x, y)
 
-        self.assertEquals(ml.best(), c)
+        self.assertEqual(ml.best(), c)
 
 
 test_expansion = """
diff --git a/sepolgen-1.2.2/tests/test_objectmodel.py b/sepolgen-1.2.2/tests/test_objectmodel.py
index 3db241c..b503672 100644
--- a/sepolgen-1.2.2/tests/test_objectmodel.py
+++ b/sepolgen-1.2.2/tests/test_objectmodel.py
@@ -25,20 +25,21 @@ class TestInfoFlow(unittest.TestCase):
         info = sepolgen.objectmodel.PermMappings()
         fd = open("perm_map")
         info.from_file(fd)
+        fd.close()
 
         pm = info.get("filesystem", "mount")
-        self.assertEquals(pm.perm, "mount")
-        self.assertEquals(pm.dir, sepolgen.objectmodel.FLOW_WRITE)
-        self.assertEquals(pm.weight, 1)
+        self.assertEqual(pm.perm, "mount")
+        self.assertEqual(pm.dir, sepolgen.objectmodel.FLOW_WRITE)
+        self.assertEqual(pm.weight, 1)
 
         self.assertRaises(KeyError, info.get, "filesystem", "foo")
 
         pm = info.getdefault("filesystem", "foo")
-        self.assertEquals(pm.perm, "foo")
-        self.assertEquals(pm.dir, sepolgen.objectmodel.FLOW_BOTH)
-        self.assertEquals(pm.weight, 5)
+        self.assertEqual(pm.perm, "foo")
+        self.assertEqual(pm.dir, sepolgen.objectmodel.FLOW_BOTH)
+        self.assertEqual(pm.weight, 5)
 
         pm = info.getdefault("foo", "bar")
-        self.assertEquals(pm.perm, "bar")
-        self.assertEquals(pm.dir, sepolgen.objectmodel.FLOW_BOTH)
-        self.assertEquals(pm.weight, 5)
+        self.assertEqual(pm.perm, "bar")
+        self.assertEqual(pm.dir, sepolgen.objectmodel.FLOW_BOTH)
+        self.assertEqual(pm.weight, 5)
diff --git a/sepolgen-1.2.2/tests/test_refparser.py b/sepolgen-1.2.2/tests/test_refparser.py
index 3fe6d79..d7db145 100644
--- a/sepolgen-1.2.2/tests/test_refparser.py
+++ b/sepolgen-1.2.2/tests/test_refparser.py
@@ -107,12 +107,12 @@ class TestParser(unittest.TestCase):
         h = refparser.parse(interface_example)
         #print ""
         #refpolicy.print_tree(h)
-        #self.assertEquals(len(h.interfaces), 3)
+        #self.assertEqual(len(h.interfaces), 3)
 
         name = "files_search_usr"
         #i = h.interfaces[name]
-        #self.assertEquals(i.name, name)
-        #self.assertEquals(len(i.rules), 1)
+        #self.assertEqual(i.name, name)
+        #self.assertEqual(len(i.rules), 1)
         #rule = i.rules[0]
         #self.assertTrue(isinstance(rule, refpolicy.AVRule))
         
diff --git a/sepolgen-1.2.2/tests/test_refpolicy.py b/sepolgen-1.2.2/tests/test_refpolicy.py
index 8c87189..16e6680 100644
--- a/sepolgen-1.2.2/tests/test_refpolicy.py
+++ b/sepolgen-1.2.2/tests/test_refpolicy.py
@@ -24,10 +24,14 @@ import selinux
 class TestIdSet(unittest.TestCase):
     def test_set_to_str(self):
         s = refpolicy.IdSet(["read", "write", "getattr"])
-        self.assertEquals(s.to_space_str(), "{ read write getattr }")
+        s = s.to_space_str().split(' ')
+        s.sort()
+        expected = "{ read write getattr }".split(' ')
+        expected.sort()
+        self.assertEqual(s, expected)
         s = refpolicy.IdSet()
         s.add("read")
-        self.assertEquals(s.to_space_str(), "read")
+        self.assertEqual(s.to_space_str(), "read")
 
 class TestSecurityContext(unittest.TestCase):
     def test_init(self):
@@ -38,25 +42,25 @@ class TestSecurityContext(unittest.TestCase):
         context = "user_u:object_r:foo_t"
         sc = refpolicy.SecurityContext()
         sc.from_string(context)
-        self.assertEquals(sc.user, "user_u")
-        self.assertEquals(sc.role, "object_r")
-        self.assertEquals(sc.type, "foo_t")
-        self.assertEquals(sc.level, None)
+        self.assertEqual(sc.user, "user_u")
+        self.assertEqual(sc.role, "object_r")
+        self.assertEqual(sc.type, "foo_t")
+        self.assertEqual(sc.level, None)
         if selinux.is_selinux_mls_enabled():
-            self.assertEquals(str(sc), context + ":s0")
+            self.assertEqual(str(sc), context + ":s0")
         else:
-            self.assertEquals(str(sc), context)
-        self.assertEquals(sc.to_string(default_level="s1"), context + ":s1")
+            self.assertEqual(str(sc), context)
+        self.assertEqual(sc.to_string(default_level="s1"), context + ":s1")
 
         context = "user_u:object_r:foo_t:s0-s0:c0-c255"
         sc = refpolicy.SecurityContext()
         sc.from_string(context)
-        self.assertEquals(sc.user, "user_u")
-        self.assertEquals(sc.role, "object_r")
-        self.assertEquals(sc.type, "foo_t")
-        self.assertEquals(sc.level, "s0-s0:c0-c255")
-        self.assertEquals(str(sc), context)
-        self.assertEquals(sc.to_string(), context)
+        self.assertEqual(sc.user, "user_u")
+        self.assertEqual(sc.role, "object_r")
+        self.assertEqual(sc.type, "foo_t")
+        self.assertEqual(sc.level, "s0-s0:c0-c255")
+        self.assertEqual(str(sc), context)
+        self.assertEqual(sc.to_string(), context)
 
         sc = refpolicy.SecurityContext()
         self.assertRaises(ValueError, sc.from_string, "abc")
@@ -67,20 +71,20 @@ class TestSecurityContext(unittest.TestCase):
         sc3 = refpolicy.SecurityContext("user_u:object_r:foo_t:s0")
         sc4 = refpolicy.SecurityContext("user_u:object_r:bar_t")
 
-        self.assertEquals(sc1, sc2)
-        self.assertNotEquals(sc1, sc3)
-        self.assertNotEquals(sc1, sc4)
+        self.assertEqual(sc1, sc2)
+        self.assertNotEqual(sc1, sc3)
+        self.assertNotEqual(sc1, sc4)
 
 class TestObjecClass(unittest.TestCase):
     def test_init(self):
         o = refpolicy.ObjectClass(name="file")
-        self.assertEquals(o.name, "file")
+        self.assertEqual(o.name, "file")
         self.assertTrue(isinstance(o.perms, set))
 
 class TestAVRule(unittest.TestCase):
     def test_init(self):
         a = refpolicy.AVRule()
-        self.assertEquals(a.rule_type, a.ALLOW)
+        self.assertEqual(a.rule_type, a.ALLOW)
         self.assertTrue(isinstance(a.src_types, set))
         self.assertTrue(isinstance(a.tgt_types, set))
         self.assertTrue(isinstance(a.obj_classes, set))
@@ -92,7 +96,7 @@ class TestAVRule(unittest.TestCase):
         a.tgt_types.add("bar_t")
         a.obj_classes.add("file")
         a.perms.add("read")
-        self.assertEquals(a.to_string(), "allow foo_t bar_t:file read;")
+        self.assertEqual(a.to_string(), "allow foo_t bar_t:file read;")
 
         a.rule_type = a.DONTAUDIT
         a.src_types.add("user_t")
@@ -100,17 +104,20 @@ class TestAVRule(unittest.TestCase):
         a.obj_classes.add("lnk_file")
         a.perms.add("write")
         # This test might need to go because set ordering is not guaranteed
-        self.assertEquals(a.to_string(),
-                          "dontaudit { foo_t user_t } { user_home_t bar_t }:{ lnk_file file } { read write };")
+        a = a.to_string().split(' ')
+        a.sort()
+        b = "dontaudit { foo_t user_t } { user_home_t bar_t }:{ lnk_file file } { read write };".split(' ')
+        b.sort()
+        self.assertEqual(a, b)
 
 class TestTypeRule(unittest.TestCase):
     def test_init(self):
         a = refpolicy.TypeRule()
-        self.assertEquals(a.rule_type, a.TYPE_TRANSITION)
+        self.assertEqual(a.rule_type, a.TYPE_TRANSITION)
         self.assertTrue(isinstance(a.src_types, set))
         self.assertTrue(isinstance(a.tgt_types, set))
         self.assertTrue(isinstance(a.obj_classes, set))
-        self.assertEquals(a.dest_type, "")
+        self.assertEqual(a.dest_type, "")
 
     def test_to_string(self):
         a = refpolicy.TypeRule()
@@ -118,7 +125,7 @@ class TestTypeRule(unittest.TestCase):
         a.tgt_types.add("bar_exec_t")
         a.obj_classes.add("process")
         a.dest_type = "bar_t"
-        self.assertEquals(a.to_string(), "type_transition foo_t bar_exec_t:process bar_t;")
+        self.assertEqual(a.to_string(), "type_transition foo_t bar_exec_t:process bar_t;")
 
 
 class TestParseNode(unittest.TestCase):