repo_id
stringclasses
208 values
file_path
stringlengths
31
190
content
stringlengths
1
2.65M
__index_level_0__
int64
0
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/spawn.py
"""distutils.spawn Provides the 'spawn()' function, a front-end to various platform- specific functions for launching another program in a sub-process. Also provides the 'find_executable()' to search the path for a given executable name. """ import sys import os import subprocess from distutils.errors import DistutilsPlatformError, DistutilsExecError from distutils.debug import DEBUG from distutils import log if sys.platform == 'darwin': _cfg_target = None _cfg_target_split = None def spawn(cmd, search_path=1, verbose=0, dry_run=0, env=None): """Run another program, specified as a command list 'cmd', in a new process. 'cmd' is just the argument list for the new process, ie. cmd[0] is the program to run and cmd[1:] are the rest of its arguments. There is no way to run a program with a name different from that of its executable. If 'search_path' is true (the default), the system's executable search path will be used to find the program; otherwise, cmd[0] must be the exact path to the executable. If 'dry_run' is true, the command will not actually be run. Raise DistutilsExecError if running the program fails in any way; just return on success. """ # cmd is documented as a list, but just in case some code passes a tuple # in, protect our %-formatting code against horrible death cmd = list(cmd) log.info(' '.join(cmd)) if dry_run: return if search_path: executable = find_executable(cmd[0]) if executable is not None: cmd[0] = executable env = env if env is not None else dict(os.environ) if sys.platform == 'darwin': global _cfg_target, _cfg_target_split if _cfg_target is None: from distutils import sysconfig _cfg_target = sysconfig.get_config_var( 'MACOSX_DEPLOYMENT_TARGET') or '' if _cfg_target: _cfg_target_split = [int(x) for x in _cfg_target.split('.')] if _cfg_target: # ensure that the deployment target of build process is not less # than that used when the interpreter was built. This ensures # extension modules are built with correct compatibility values cur_target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', _cfg_target) if _cfg_target_split > [int(x) for x in cur_target.split('.')]: my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: ' 'now "%s" but "%s" during configure' % (cur_target, _cfg_target)) raise DistutilsPlatformError(my_msg) env.update(MACOSX_DEPLOYMENT_TARGET=cur_target) try: proc = subprocess.Popen(cmd, env=env) proc.wait() exitcode = proc.returncode except OSError as exc: if not DEBUG: cmd = cmd[0] raise DistutilsExecError( "command %r failed: %s" % (cmd, exc.args[-1])) from exc if exitcode: if not DEBUG: cmd = cmd[0] raise DistutilsExecError( "command %r failed with exit code %s" % (cmd, exitcode)) def find_executable(executable, path=None): """Tries to find 'executable' in the directories listed in 'path'. A string listing directories separated by 'os.pathsep'; defaults to os.environ['PATH']. Returns the complete filename or None if not found. """ _, ext = os.path.splitext(executable) if (sys.platform == 'win32') and (ext != '.exe'): executable = executable + '.exe' if os.path.isfile(executable): return executable if path is None: path = os.environ.get('PATH', None) if path is None: try: path = os.confstr("CS_PATH") except (AttributeError, ValueError): # os.confstr() or CS_PATH is not available path = os.defpath # bpo-35755: Don't use os.defpath if the PATH environment variable is # set to an empty string # PATH='' doesn't match, whereas PATH=':' looks in the current directory if not path: return None paths = path.split(os.pathsep) for p in paths: f = os.path.join(p, executable) if os.path.isfile(f): # the file exists, we have a shot at spawn working return f return None
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/text_file.py
"""text_file provides the TextFile class, which gives an interface to text files that (optionally) takes care of stripping comments, ignoring blank lines, and joining lines with backslashes.""" import sys, io class TextFile: """Provides a file-like object that takes care of all the things you commonly want to do when processing a text file that has some line-by-line syntax: strip comments (as long as "#" is your comment character), skip blank lines, join adjacent lines by escaping the newline (ie. backslash at end of line), strip leading and/or trailing whitespace. All of these are optional and independently controllable. Provides a 'warn()' method so you can generate warning messages that report physical line number, even if the logical line in question spans multiple physical lines. Also provides 'unreadline()' for implementing line-at-a-time lookahead. Constructor is called as: TextFile (filename=None, file=None, **options) It bombs (RuntimeError) if both 'filename' and 'file' are None; 'filename' should be a string, and 'file' a file object (or something that provides 'readline()' and 'close()' methods). It is recommended that you supply at least 'filename', so that TextFile can include it in warning messages. If 'file' is not supplied, TextFile creates its own using 'io.open()'. The options are all boolean, and affect the value returned by 'readline()': strip_comments [default: true] strip from "#" to end-of-line, as well as any whitespace leading up to the "#" -- unless it is escaped by a backslash lstrip_ws [default: false] strip leading whitespace from each line before returning it rstrip_ws [default: true] strip trailing whitespace (including line terminator!) from each line before returning it skip_blanks [default: true} skip lines that are empty *after* stripping comments and whitespace. (If both lstrip_ws and rstrip_ws are false, then some lines may consist of solely whitespace: these will *not* be skipped, even if 'skip_blanks' is true.) join_lines [default: false] if a backslash is the last non-newline character on a line after stripping comments and whitespace, join the following line to it to form one "logical line"; if N consecutive lines end with a backslash, then N+1 physical lines will be joined to form one logical line. collapse_join [default: false] strip leading whitespace from lines that are joined to their predecessor; only matters if (join_lines and not lstrip_ws) errors [default: 'strict'] error handler used to decode the file content Note that since 'rstrip_ws' can strip the trailing newline, the semantics of 'readline()' must differ from those of the builtin file object's 'readline()' method! In particular, 'readline()' returns None for end-of-file: an empty string might just be a blank line (or an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is not.""" default_options = { 'strip_comments': 1, 'skip_blanks': 1, 'lstrip_ws': 0, 'rstrip_ws': 1, 'join_lines': 0, 'collapse_join': 0, 'errors': 'strict', } def __init__(self, filename=None, file=None, **options): """Construct a new TextFile object. At least one of 'filename' (a string) and 'file' (a file-like object) must be supplied. They keyword argument options are described above and affect the values returned by 'readline()'.""" if filename is None and file is None: raise RuntimeError("you must supply either or both of 'filename' and 'file'") # set values for all options -- either from client option hash # or fallback to default_options for opt in self.default_options.keys(): if opt in options: setattr(self, opt, options[opt]) else: setattr(self, opt, self.default_options[opt]) # sanity check client option hash for opt in options.keys(): if opt not in self.default_options: raise KeyError("invalid TextFile option '%s'" % opt) if file is None: self.open(filename) else: self.filename = filename self.file = file self.current_line = 0 # assuming that file is at BOF! # 'linebuf' is a stack of lines that will be emptied before we # actually read from the file; it's only populated by an # 'unreadline()' operation self.linebuf = [] def open(self, filename): """Open a new file named 'filename'. This overrides both the 'filename' and 'file' arguments to the constructor.""" self.filename = filename self.file = io.open(self.filename, 'r', errors=self.errors) self.current_line = 0 def close(self): """Close the current file and forget everything we know about it (filename, current line number).""" file = self.file self.file = None self.filename = None self.current_line = None file.close() def gen_error(self, msg, line=None): outmsg = [] if line is None: line = self.current_line outmsg.append(self.filename + ", ") if isinstance(line, (list, tuple)): outmsg.append("lines %d-%d: " % tuple(line)) else: outmsg.append("line %d: " % line) outmsg.append(str(msg)) return "".join(outmsg) def error(self, msg, line=None): raise ValueError("error: " + self.gen_error(msg, line)) def warn(self, msg, line=None): """Print (to stderr) a warning message tied to the current logical line in the current file. If the current logical line in the file spans multiple physical lines, the warning refers to the whole range, eg. "lines 3-5". If 'line' supplied, it overrides the current line number; it may be a list or tuple to indicate a range of physical lines, or an integer for a single physical line.""" sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n") def readline(self): """Read and return a single logical line from the current file (or from an internal buffer if lines have previously been "unread" with 'unreadline()'). If the 'join_lines' option is true, this may involve reading multiple physical lines concatenated into a single string. Updates the current line number, so calling 'warn()' after 'readline()' emits a warning about the physical line(s) just read. Returns None on end-of-file, since the empty string can occur if 'rstrip_ws' is true but 'strip_blanks' is not.""" # If any "unread" lines waiting in 'linebuf', return the top # one. (We don't actually buffer read-ahead data -- lines only # get put in 'linebuf' if the client explicitly does an # 'unreadline()'. if self.linebuf: line = self.linebuf[-1] del self.linebuf[-1] return line buildup_line = '' while True: # read the line, make it None if EOF line = self.file.readline() if line == '': line = None if self.strip_comments and line: # Look for the first "#" in the line. If none, never # mind. If we find one and it's the first character, or # is not preceded by "\", then it starts a comment -- # strip the comment, strip whitespace before it, and # carry on. Otherwise, it's just an escaped "#", so # unescape it (and any other escaped "#"'s that might be # lurking in there) and otherwise leave the line alone. pos = line.find("#") if pos == -1: # no "#" -- no comments pass # It's definitely a comment -- either "#" is the first # character, or it's elsewhere and unescaped. elif pos == 0 or line[pos-1] != "\\": # Have to preserve the trailing newline, because it's # the job of a later step (rstrip_ws) to remove it -- # and if rstrip_ws is false, we'd better preserve it! # (NB. this means that if the final line is all comment # and has no trailing newline, we will think that it's # EOF; I think that's OK.) eol = (line[-1] == '\n') and '\n' or '' line = line[0:pos] + eol # If all that's left is whitespace, then skip line # *now*, before we try to join it to 'buildup_line' -- # that way constructs like # hello \\ # # comment that should be ignored # there # result in "hello there". if line.strip() == "": continue else: # it's an escaped "#" line = line.replace("\\#", "#") # did previous line end with a backslash? then accumulate if self.join_lines and buildup_line: # oops: end of file if line is None: self.warn("continuation line immediately precedes " "end-of-file") return buildup_line if self.collapse_join: line = line.lstrip() line = buildup_line + line # careful: pay attention to line number when incrementing it if isinstance(self.current_line, list): self.current_line[1] = self.current_line[1] + 1 else: self.current_line = [self.current_line, self.current_line + 1] # just an ordinary line, read it as usual else: if line is None: # eof return None # still have to be careful about incrementing the line number! if isinstance(self.current_line, list): self.current_line = self.current_line[1] + 1 else: self.current_line = self.current_line + 1 # strip whitespace however the client wants (leading and # trailing, or one or the other, or neither) if self.lstrip_ws and self.rstrip_ws: line = line.strip() elif self.lstrip_ws: line = line.lstrip() elif self.rstrip_ws: line = line.rstrip() # blank line (whether we rstrip'ed or not)? skip to next line # if appropriate if (line == '' or line == '\n') and self.skip_blanks: continue if self.join_lines: if line[-1] == '\\': buildup_line = line[:-1] continue if line[-2:] == '\\\n': buildup_line = line[0:-2] + '\n' continue # well, I guess there's some actual content there: return it return line def readlines(self): """Read and return the list of all logical lines remaining in the current file.""" lines = [] while True: line = self.readline() if line is None: return lines lines.append(line) def unreadline(self, line): """Push 'line' (a string) onto an internal buffer that will be checked by future 'readline()' calls. Handy for implementing a parser with line-at-a-time lookahead.""" self.linebuf.append(line)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/msvccompiler.py
"""distutils.msvccompiler Contains MSVCCompiler, an implementation of the abstract CCompiler class for the Microsoft Visual Studio. """ # Written by Perry Stoll # hacked by Robin Becker and Thomas Heller to do a better job of # finding DevStudio (through the registry) import sys, os from distutils.errors import \ DistutilsExecError, DistutilsPlatformError, \ CompileError, LibError, LinkError from distutils.ccompiler import \ CCompiler, gen_lib_options from distutils import log _can_read_reg = False try: import winreg _can_read_reg = True hkey_mod = winreg RegOpenKeyEx = winreg.OpenKeyEx RegEnumKey = winreg.EnumKey RegEnumValue = winreg.EnumValue RegError = winreg.error except ImportError: try: import win32api import win32con _can_read_reg = True hkey_mod = win32con RegOpenKeyEx = win32api.RegOpenKeyEx RegEnumKey = win32api.RegEnumKey RegEnumValue = win32api.RegEnumValue RegError = win32api.error except ImportError: log.info("Warning: Can't read registry to find the " "necessary compiler setting\n" "Make sure that Python modules winreg, " "win32api or win32con are installed.") pass if _can_read_reg: HKEYS = (hkey_mod.HKEY_USERS, hkey_mod.HKEY_CURRENT_USER, hkey_mod.HKEY_LOCAL_MACHINE, hkey_mod.HKEY_CLASSES_ROOT) def read_keys(base, key): """Return list of registry keys.""" try: handle = RegOpenKeyEx(base, key) except RegError: return None L = [] i = 0 while True: try: k = RegEnumKey(handle, i) except RegError: break L.append(k) i += 1 return L def read_values(base, key): """Return dict of registry keys and values. All names are converted to lowercase. """ try: handle = RegOpenKeyEx(base, key) except RegError: return None d = {} i = 0 while True: try: name, value, type = RegEnumValue(handle, i) except RegError: break name = name.lower() d[convert_mbcs(name)] = convert_mbcs(value) i += 1 return d def convert_mbcs(s): dec = getattr(s, "decode", None) if dec is not None: try: s = dec("mbcs") except UnicodeError: pass return s class MacroExpander: def __init__(self, version): self.macros = {} self.load_macros(version) def set_macro(self, macro, path, key): for base in HKEYS: d = read_values(base, path) if d: self.macros["$(%s)" % macro] = d[key] break def load_macros(self, version): vsbase = r"Software\Microsoft\VisualStudio\%0.1f" % version self.set_macro("VCInstallDir", vsbase + r"\Setup\VC", "productdir") self.set_macro("VSInstallDir", vsbase + r"\Setup\VS", "productdir") net = r"Software\Microsoft\.NETFramework" self.set_macro("FrameworkDir", net, "installroot") try: if version > 7.0: self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1") else: self.set_macro("FrameworkSDKDir", net, "sdkinstallroot") except KeyError as exc: # raise DistutilsPlatformError( """Python was built with Visual Studio 2003; extensions must be built with a compiler than can generate compatible binaries. Visual Studio 2003 was not found on this system. If you have Cygwin installed, you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""") p = r"Software\Microsoft\NET Framework Setup\Product" for base in HKEYS: try: h = RegOpenKeyEx(base, p) except RegError: continue key = RegEnumKey(h, 0) d = read_values(base, r"%s\%s" % (p, key)) self.macros["$(FrameworkVersion)"] = d["version"] def sub(self, s): for k, v in self.macros.items(): s = s.replace(k, v) return s def get_build_version(): """Return the version of MSVC that was used to build Python. For Python 2.3 and up, the version number is included in sys.version. For earlier versions, assume the compiler is MSVC 6. """ prefix = "MSC v." i = sys.version.find(prefix) if i == -1: return 6 i = i + len(prefix) s, rest = sys.version[i:].split(" ", 1) majorVersion = int(s[:-2]) - 6 if majorVersion >= 13: # v13 was skipped and should be v14 majorVersion += 1 minorVersion = int(s[2:3]) / 10.0 # I don't think paths are affected by minor version in version 6 if majorVersion == 6: minorVersion = 0 if majorVersion >= 6: return majorVersion + minorVersion # else we don't know what version of the compiler this is return None def get_build_architecture(): """Return the processor architecture. Possible results are "Intel" or "AMD64". """ prefix = " bit (" i = sys.version.find(prefix) if i == -1: return "Intel" j = sys.version.find(")", i) return sys.version[i+len(prefix):j] def normalize_and_reduce_paths(paths): """Return a list of normalized paths with duplicates removed. The current order of paths is maintained. """ # Paths are normalized so things like: /a and /a/ aren't both preserved. reduced_paths = [] for p in paths: np = os.path.normpath(p) # XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set. if np not in reduced_paths: reduced_paths.append(np) return reduced_paths class MSVCCompiler(CCompiler) : """Concrete class that implements an interface to Microsoft Visual C++, as defined by the CCompiler abstract class.""" compiler_type = 'msvc' # Just set this so CCompiler's constructor doesn't barf. We currently # don't use the 'set_executables()' bureaucracy provided by CCompiler, # as it really isn't necessary for this sort of single-compiler class. # Would be nice to have a consistent interface with UnixCCompiler, # though, so it's worth thinking about. executables = {} # Private class data (need to distinguish C from C++ source for compiler) _c_extensions = ['.c'] _cpp_extensions = ['.cc', '.cpp', '.cxx'] _rc_extensions = ['.rc'] _mc_extensions = ['.mc'] # Needed for the filename generation methods provided by the # base class, CCompiler. src_extensions = (_c_extensions + _cpp_extensions + _rc_extensions + _mc_extensions) res_extension = '.res' obj_extension = '.obj' static_lib_extension = '.lib' shared_lib_extension = '.dll' static_lib_format = shared_lib_format = '%s%s' exe_extension = '.exe' def __init__(self, verbose=0, dry_run=0, force=0): CCompiler.__init__ (self, verbose, dry_run, force) self.__version = get_build_version() self.__arch = get_build_architecture() if self.__arch == "Intel": # x86 if self.__version >= 7: self.__root = r"Software\Microsoft\VisualStudio" self.__macros = MacroExpander(self.__version) else: self.__root = r"Software\Microsoft\Devstudio" self.__product = "Visual Studio version %s" % self.__version else: # Win64. Assume this was built with the platform SDK self.__product = "Microsoft SDK compiler %s" % (self.__version + 6) self.initialized = False def initialize(self): self.__paths = [] if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"): # Assume that the SDK set up everything alright; don't try to be # smarter self.cc = "cl.exe" self.linker = "link.exe" self.lib = "lib.exe" self.rc = "rc.exe" self.mc = "mc.exe" else: self.__paths = self.get_msvc_paths("path") if len(self.__paths) == 0: raise DistutilsPlatformError("Python was built with %s, " "and extensions need to be built with the same " "version of the compiler, but it isn't installed." % self.__product) self.cc = self.find_exe("cl.exe") self.linker = self.find_exe("link.exe") self.lib = self.find_exe("lib.exe") self.rc = self.find_exe("rc.exe") # resource compiler self.mc = self.find_exe("mc.exe") # message compiler self.set_path_env_var('lib') self.set_path_env_var('include') # extend the MSVC path with the current path try: for p in os.environ['path'].split(';'): self.__paths.append(p) except KeyError: pass self.__paths = normalize_and_reduce_paths(self.__paths) os.environ['path'] = ";".join(self.__paths) self.preprocess_options = None if self.__arch == "Intel": self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GX' , '/DNDEBUG'] self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX', '/Z7', '/D_DEBUG'] else: # Win64 self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' , '/DNDEBUG'] self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-', '/Z7', '/D_DEBUG'] self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO'] if self.__version >= 7: self.ldflags_shared_debug = [ '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG' ] else: self.ldflags_shared_debug = [ '/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG' ] self.ldflags_static = [ '/nologo'] self.initialized = True # -- Worker methods ------------------------------------------------ def object_filenames(self, source_filenames, strip_dir=0, output_dir=''): # Copied from ccompiler.py, extended to return .res as 'object'-file # for .rc input file if output_dir is None: output_dir = '' obj_names = [] for src_name in source_filenames: (base, ext) = os.path.splitext (src_name) base = os.path.splitdrive(base)[1] # Chop off the drive base = base[os.path.isabs(base):] # If abs, chop off leading / if ext not in self.src_extensions: # Better to raise an exception instead of silently continuing # and later complain about sources and targets having # different lengths raise CompileError ("Don't know how to compile %s" % src_name) if strip_dir: base = os.path.basename (base) if ext in self._rc_extensions: obj_names.append (os.path.join (output_dir, base + self.res_extension)) elif ext in self._mc_extensions: obj_names.append (os.path.join (output_dir, base + self.res_extension)) else: obj_names.append (os.path.join (output_dir, base + self.obj_extension)) return obj_names def compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): if not self.initialized: self.initialize() compile_info = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs) macros, objects, extra_postargs, pp_opts, build = compile_info compile_opts = extra_preargs or [] compile_opts.append ('/c') if debug: compile_opts.extend(self.compile_options_debug) else: compile_opts.extend(self.compile_options) for obj in objects: try: src, ext = build[obj] except KeyError: continue if debug: # pass the full pathname to MSVC in debug mode, # this allows the debugger to find the source file # without asking the user to browse for it src = os.path.abspath(src) if ext in self._c_extensions: input_opt = "/Tc" + src elif ext in self._cpp_extensions: input_opt = "/Tp" + src elif ext in self._rc_extensions: # compile .RC to .RES file input_opt = src output_opt = "/fo" + obj try: self.spawn([self.rc] + pp_opts + [output_opt] + [input_opt]) except DistutilsExecError as msg: raise CompileError(msg) continue elif ext in self._mc_extensions: # Compile .MC to .RC file to .RES file. # * '-h dir' specifies the directory for the # generated include file # * '-r dir' specifies the target directory of the # generated RC file and the binary message resource # it includes # # For now (since there are no options to change this), # we use the source-directory for the include file and # the build directory for the RC file and message # resources. This works at least for win32all. h_dir = os.path.dirname(src) rc_dir = os.path.dirname(obj) try: # first compile .MC to .RC and .H file self.spawn([self.mc] + ['-h', h_dir, '-r', rc_dir] + [src]) base, _ = os.path.splitext (os.path.basename (src)) rc_file = os.path.join (rc_dir, base + '.rc') # then compile .RC to .RES file self.spawn([self.rc] + ["/fo" + obj] + [rc_file]) except DistutilsExecError as msg: raise CompileError(msg) continue else: # how to handle this file? raise CompileError("Don't know how to compile %s to %s" % (src, obj)) output_opt = "/Fo" + obj try: self.spawn([self.cc] + compile_opts + pp_opts + [input_opt, output_opt] + extra_postargs) except DistutilsExecError as msg: raise CompileError(msg) return objects def create_static_lib(self, objects, output_libname, output_dir=None, debug=0, target_lang=None): if not self.initialized: self.initialize() (objects, output_dir) = self._fix_object_args(objects, output_dir) output_filename = self.library_filename(output_libname, output_dir=output_dir) if self._need_link(objects, output_filename): lib_args = objects + ['/OUT:' + output_filename] if debug: pass # XXX what goes here? try: self.spawn([self.lib] + lib_args) except DistutilsExecError as msg: raise LibError(msg) else: log.debug("skipping %s (up-to-date)", output_filename) def link(self, target_desc, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None): if not self.initialized: self.initialize() (objects, output_dir) = self._fix_object_args(objects, output_dir) fixed_args = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) (libraries, library_dirs, runtime_library_dirs) = fixed_args if runtime_library_dirs: self.warn ("I don't know what to do with 'runtime_library_dirs': " + str (runtime_library_dirs)) lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries) if output_dir is not None: output_filename = os.path.join(output_dir, output_filename) if self._need_link(objects, output_filename): if target_desc == CCompiler.EXECUTABLE: if debug: ldflags = self.ldflags_shared_debug[1:] else: ldflags = self.ldflags_shared[1:] else: if debug: ldflags = self.ldflags_shared_debug else: ldflags = self.ldflags_shared export_opts = [] for sym in (export_symbols or []): export_opts.append("/EXPORT:" + sym) ld_args = (ldflags + lib_opts + export_opts + objects + ['/OUT:' + output_filename]) # The MSVC linker generates .lib and .exp files, which cannot be # suppressed by any linker switches. The .lib files may even be # needed! Make sure they are generated in the temporary build # directory. Since they have different names for debug and release # builds, they can go into the same directory. if export_symbols is not None: (dll_name, dll_ext) = os.path.splitext( os.path.basename(output_filename)) implib_file = os.path.join( os.path.dirname(objects[0]), self.library_filename(dll_name)) ld_args.append ('/IMPLIB:' + implib_file) if extra_preargs: ld_args[:0] = extra_preargs if extra_postargs: ld_args.extend(extra_postargs) self.mkpath(os.path.dirname(output_filename)) try: self.spawn([self.linker] + ld_args) except DistutilsExecError as msg: raise LinkError(msg) else: log.debug("skipping %s (up-to-date)", output_filename) # -- Miscellaneous methods ----------------------------------------- # These are all used by the 'gen_lib_options() function, in # ccompiler.py. def library_dir_option(self, dir): return "/LIBPATH:" + dir def runtime_library_dir_option(self, dir): raise DistutilsPlatformError( "don't know how to set runtime library search path for MSVC++") def library_option(self, lib): return self.library_filename(lib) def find_library_file(self, dirs, lib, debug=0): # Prefer a debugging library if found (and requested), but deal # with it if we don't have one. if debug: try_names = [lib + "_d", lib] else: try_names = [lib] for dir in dirs: for name in try_names: libfile = os.path.join(dir, self.library_filename (name)) if os.path.exists(libfile): return libfile else: # Oops, didn't find it in *any* of 'dirs' return None # Helper methods for using the MSVC registry settings def find_exe(self, exe): """Return path to an MSVC executable program. Tries to find the program in several places: first, one of the MSVC program search paths from the registry; next, the directories in the PATH environment variable. If any of those work, return an absolute path that is known to exist. If none of them work, just return the original program name, 'exe'. """ for p in self.__paths: fn = os.path.join(os.path.abspath(p), exe) if os.path.isfile(fn): return fn # didn't find it; try existing path for p in os.environ['Path'].split(';'): fn = os.path.join(os.path.abspath(p),exe) if os.path.isfile(fn): return fn return exe def get_msvc_paths(self, path, platform='x86'): """Get a list of devstudio directories (include, lib or path). Return a list of strings. The list will be empty if unable to access the registry or appropriate registry keys not found. """ if not _can_read_reg: return [] path = path + " dirs" if self.__version >= 7: key = (r"%s\%0.1f\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories" % (self.__root, self.__version)) else: key = (r"%s\6.0\Build System\Components\Platforms" r"\Win32 (%s)\Directories" % (self.__root, platform)) for base in HKEYS: d = read_values(base, key) if d: if self.__version >= 7: return self.__macros.sub(d[path]).split(";") else: return d[path].split(";") # MSVC 6 seems to create the registry entries we need only when # the GUI is run. if self.__version == 6: for base in HKEYS: if read_values(base, r"%s\6.0" % self.__root) is not None: self.warn("It seems you have Visual Studio 6 installed, " "but the expected registry settings are not present.\n" "You must at least run the Visual Studio GUI once " "so that these entries are created.") break return [] def set_path_env_var(self, name): """Set environment variable 'name' to an MSVC path type value. This is equivalent to a SET command prior to execution of spawned commands. """ if name == "lib": p = self.get_msvc_paths("library") else: p = self.get_msvc_paths(name) if p: os.environ[name] = ';'.join(p) if get_build_version() >= 8.0: log.debug("Importing new compiler from distutils.msvc9compiler") OldMSVCCompiler = MSVCCompiler from distutils.msvc9compiler import MSVCCompiler # get_build_architecture not really relevant now we support cross-compile from distutils.msvc9compiler import MacroExpander
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/errors.py
"""distutils.errors Provides exceptions used by the Distutils modules. Note that Distutils modules may raise standard exceptions; in particular, SystemExit is usually raised for errors that are obviously the end-user's fault (eg. bad command-line arguments). This module is safe to use in "from ... import *" mode; it only exports symbols whose names start with "Distutils" and end with "Error".""" class DistutilsError (Exception): """The root of all Distutils evil.""" pass class DistutilsModuleError (DistutilsError): """Unable to load an expected module, or to find an expected class within some module (in particular, command modules and classes).""" pass class DistutilsClassError (DistutilsError): """Some command class (or possibly distribution class, if anyone feels a need to subclass Distribution) is found not to be holding up its end of the bargain, ie. implementing some part of the "command "interface.""" pass class DistutilsGetoptError (DistutilsError): """The option table provided to 'fancy_getopt()' is bogus.""" pass class DistutilsArgError (DistutilsError): """Raised by fancy_getopt in response to getopt.error -- ie. an error in the command line usage.""" pass class DistutilsFileError (DistutilsError): """Any problems in the filesystem: expected file not found, etc. Typically this is for problems that we detect before OSError could be raised.""" pass class DistutilsOptionError (DistutilsError): """Syntactic/semantic errors in command options, such as use of mutually conflicting options, or inconsistent options, badly-spelled values, etc. No distinction is made between option values originating in the setup script, the command line, config files, or what-have-you -- but if we *know* something originated in the setup script, we'll raise DistutilsSetupError instead.""" pass class DistutilsSetupError (DistutilsError): """For errors that can be definitely blamed on the setup script, such as invalid keyword arguments to 'setup()'.""" pass class DistutilsPlatformError (DistutilsError): """We don't know how to do something on the current platform (but we do know how to do it on some platform) -- eg. trying to compile C files on a platform not supported by a CCompiler subclass.""" pass class DistutilsExecError (DistutilsError): """Any problems executing an external program (such as the C compiler, when compiling C files).""" pass class DistutilsInternalError (DistutilsError): """Internal inconsistencies or impossibilities (obviously, this should never be seen if the code is working!).""" pass class DistutilsTemplateError (DistutilsError): """Syntax error in a file list template.""" class DistutilsByteCompileError(DistutilsError): """Byte compile error.""" # Exception classes used by the CCompiler implementation classes class CCompilerError (Exception): """Some compile/link operation failed.""" class PreprocessError (CCompilerError): """Failure to preprocess one or more C/C++ files.""" class CompileError (CCompilerError): """Failure to compile one or more C/C++ source files.""" class LibError (CCompilerError): """Failure to create a static library from one or more C/C++ object files.""" class LinkError (CCompilerError): """Failure to link one or more C/C++ object files into an executable or shared library file.""" class UnknownFileError (CCompilerError): """Attempt to process an unknown file type."""
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/dep_util.py
"""distutils.dep_util Utility functions for simple, timestamp-based dependency of files and groups of files; also, function based entirely on such timestamp dependency analysis.""" import os from distutils.errors import DistutilsFileError def newer (source, target): """Return true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Return false if both exist and 'target' is the same age or younger than 'source'. Raise DistutilsFileError if 'source' does not exist. """ if not os.path.exists(source): raise DistutilsFileError("file '%s' does not exist" % os.path.abspath(source)) if not os.path.exists(target): return 1 from stat import ST_MTIME mtime1 = os.stat(source)[ST_MTIME] mtime2 = os.stat(target)[ST_MTIME] return mtime1 > mtime2 # newer () def newer_pairwise (sources, targets): """Walk two filename lists in parallel, testing if each source is newer than its corresponding target. Return a pair of lists (sources, targets) where source is newer than target, according to the semantics of 'newer()'. """ if len(sources) != len(targets): raise ValueError("'sources' and 'targets' must be same length") # build a pair of lists (sources, targets) where source is newer n_sources = [] n_targets = [] for i in range(len(sources)): if newer(sources[i], targets[i]): n_sources.append(sources[i]) n_targets.append(targets[i]) return (n_sources, n_targets) # newer_pairwise () def newer_group (sources, target, missing='error'): """Return true if 'target' is out-of-date with respect to any file listed in 'sources'. In other words, if 'target' exists and is newer than every file in 'sources', return false; otherwise return true. 'missing' controls what we do when a source file is missing; the default ("error") is to blow up with an OSError from inside 'stat()'; if it is "ignore", we silently drop any missing source files; if it is "newer", any missing source files make us assume that 'target' is out-of-date (this is handy in "dry-run" mode: it'll make you pretend to carry out commands that wouldn't work because inputs are missing, but that doesn't matter because you're not actually going to run the commands). """ # If the target doesn't even exist, then it's definitely out-of-date. if not os.path.exists(target): return 1 # Otherwise we have to find out the hard way: if *any* source file # is more recent than 'target', then 'target' is out-of-date and # we can immediately return true. If we fall through to the end # of the loop, then 'target' is up-to-date and we return false. from stat import ST_MTIME target_mtime = os.stat(target)[ST_MTIME] for source in sources: if not os.path.exists(source): if missing == 'error': # blow up when we stat() the file pass elif missing == 'ignore': # missing source dropped from continue # target's dependency list elif missing == 'newer': # missing source means target is return 1 # out-of-date source_mtime = os.stat(source)[ST_MTIME] if source_mtime > target_mtime: return 1 else: return 0 # newer_group ()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/dir_util.py
"""distutils.dir_util Utility functions for manipulating directories and directory trees.""" import os import errno from distutils.errors import DistutilsFileError, DistutilsInternalError from distutils import log # cache for by mkpath() -- in addition to cheapening redundant calls, # eliminates redundant "creating /foo/bar/baz" messages in dry-run mode _path_created = {} # I don't use os.makedirs because a) it's new to Python 1.5.2, and # b) it blows up if the directory already exists (I want to silently # succeed in that case). def mkpath(name, mode=0o777, verbose=1, dry_run=0): """Create a directory and any missing ancestor directories. If the directory already exists (or if 'name' is the empty string, which means the current directory, which of course exists), then do nothing. Raise DistutilsFileError if unable to create some directory along the way (eg. some sub-path exists, but is a file rather than a directory). If 'verbose' is true, print a one-line summary of each mkdir to stdout. Return the list of directories actually created. """ global _path_created # Detect a common bug -- name is None if not isinstance(name, str): raise DistutilsInternalError( "mkpath: 'name' must be a string (got %r)" % (name,)) # XXX what's the better way to handle verbosity? print as we create # each directory in the path (the current behaviour), or only announce # the creation of the whole path? (quite easy to do the latter since # we're not using a recursive algorithm) name = os.path.normpath(name) created_dirs = [] if os.path.isdir(name) or name == '': return created_dirs if _path_created.get(os.path.abspath(name)): return created_dirs (head, tail) = os.path.split(name) tails = [tail] # stack of lone dirs to create while head and tail and not os.path.isdir(head): (head, tail) = os.path.split(head) tails.insert(0, tail) # push next higher dir onto stack # now 'head' contains the deepest directory that already exists # (that is, the child of 'head' in 'name' is the highest directory # that does *not* exist) for d in tails: #print "head = %s, d = %s: " % (head, d), head = os.path.join(head, d) abs_head = os.path.abspath(head) if _path_created.get(abs_head): continue if verbose >= 1: log.info("creating %s", head) if not dry_run: try: os.mkdir(head, mode) except OSError as exc: if not (exc.errno == errno.EEXIST and os.path.isdir(head)): raise DistutilsFileError( "could not create '%s': %s" % (head, exc.args[-1])) created_dirs.append(head) _path_created[abs_head] = 1 return created_dirs def create_tree(base_dir, files, mode=0o777, verbose=1, dry_run=0): """Create all the empty directories under 'base_dir' needed to put 'files' there. 'base_dir' is just the name of a directory which doesn't necessarily exist yet; 'files' is a list of filenames to be interpreted relative to 'base_dir'. 'base_dir' + the directory portion of every file in 'files' will be created if it doesn't already exist. 'mode', 'verbose' and 'dry_run' flags are as for 'mkpath()'. """ # First get the list of directories to create need_dir = set() for file in files: need_dir.add(os.path.join(base_dir, os.path.dirname(file))) # Now create them for dir in sorted(need_dir): mkpath(dir, mode, verbose=verbose, dry_run=dry_run) def copy_tree(src, dst, preserve_mode=1, preserve_times=1, preserve_symlinks=0, update=0, verbose=1, dry_run=0): """Copy an entire directory tree 'src' to a new location 'dst'. Both 'src' and 'dst' must be directory names. If 'src' is not a directory, raise DistutilsFileError. If 'dst' does not exist, it is created with 'mkpath()'. The end result of the copy is that every file in 'src' is copied to 'dst', and directories under 'src' are recursively copied to 'dst'. Return the list of files that were copied or might have been copied, using their output name. The return value is unaffected by 'update' or 'dry_run': it is simply the list of all files under 'src', with the names changed to be under 'dst'. 'preserve_mode' and 'preserve_times' are the same as for 'copy_file'; note that they only apply to regular files, not to directories. If 'preserve_symlinks' is true, symlinks will be copied as symlinks (on platforms that support them!); otherwise (the default), the destination of the symlink will be copied. 'update' and 'verbose' are the same as for 'copy_file'. """ from distutils.file_util import copy_file if not dry_run and not os.path.isdir(src): raise DistutilsFileError( "cannot copy tree '%s': not a directory" % src) try: names = os.listdir(src) except OSError as e: if dry_run: names = [] else: raise DistutilsFileError( "error listing files in '%s': %s" % (src, e.strerror)) if not dry_run: mkpath(dst, verbose=verbose) outputs = [] for n in names: src_name = os.path.join(src, n) dst_name = os.path.join(dst, n) if n.startswith('.nfs'): # skip NFS rename files continue if preserve_symlinks and os.path.islink(src_name): link_dest = os.readlink(src_name) if verbose >= 1: log.info("linking %s -> %s", dst_name, link_dest) if not dry_run: os.symlink(link_dest, dst_name) outputs.append(dst_name) elif os.path.isdir(src_name): outputs.extend( copy_tree(src_name, dst_name, preserve_mode, preserve_times, preserve_symlinks, update, verbose=verbose, dry_run=dry_run)) else: copy_file(src_name, dst_name, preserve_mode, preserve_times, update, verbose=verbose, dry_run=dry_run) outputs.append(dst_name) return outputs def _build_cmdtuple(path, cmdtuples): """Helper for remove_tree().""" for f in os.listdir(path): real_f = os.path.join(path,f) if os.path.isdir(real_f) and not os.path.islink(real_f): _build_cmdtuple(real_f, cmdtuples) else: cmdtuples.append((os.remove, real_f)) cmdtuples.append((os.rmdir, path)) def remove_tree(directory, verbose=1, dry_run=0): """Recursively remove an entire directory tree. Any errors are ignored (apart from being reported to stdout if 'verbose' is true). """ global _path_created if verbose >= 1: log.info("removing '%s' (and everything under it)", directory) if dry_run: return cmdtuples = [] _build_cmdtuple(directory, cmdtuples) for cmd in cmdtuples: try: cmd[0](cmd[1]) # remove dir from cache if it's already there abspath = os.path.abspath(cmd[1]) if abspath in _path_created: del _path_created[abspath] except OSError as exc: log.warn("error removing %s: %s", directory, exc) def ensure_relative(path): """Take the full path 'path', and make it a relative path. This is useful to make 'path' the second argument to os.path.join(). """ drive, path = os.path.splitdrive(path) if path[0:1] == os.sep: path = drive + path[1:] return path
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/sysconfig.py
"""Provide access to Python's configuration information. The specific configuration variables available depend heavily on the platform and configuration. The values may be retrieved using get_config_var(name), and the list of variables is available via get_config_vars().keys(). Additional convenience functions are also available. Written by: Fred L. Drake, Jr. Email: <fdrake@acm.org> """ import _imp import os import re import sys from .errors import DistutilsPlatformError IS_PYPY = '__pypy__' in sys.builtin_module_names # These are needed in a couple of spots, so just compute them once. PREFIX = os.path.normpath(sys.prefix) EXEC_PREFIX = os.path.normpath(sys.exec_prefix) BASE_PREFIX = os.path.normpath(sys.base_prefix) BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix) # Path to the base directory of the project. On Windows the binary may # live in project/PCbuild/win32 or project/PCbuild/amd64. # set for cross builds if "_PYTHON_PROJECT_BASE" in os.environ: project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"]) else: if sys.executable: project_base = os.path.dirname(os.path.abspath(sys.executable)) else: # sys.executable can be empty if argv[0] has been changed and Python is # unable to retrieve the real program name project_base = os.getcwd() # python_build: (Boolean) if true, we're either building Python or # building an extension with an un-installed Python, so we use # different (hard-wired) directories. def _is_python_source_dir(d): for fn in ("Setup", "Setup.local"): if os.path.isfile(os.path.join(d, "Modules", fn)): return True return False _sys_home = getattr(sys, '_home', None) if os.name == 'nt': def _fix_pcbuild(d): if d and os.path.normcase(d).startswith( os.path.normcase(os.path.join(PREFIX, "PCbuild"))): return PREFIX return d project_base = _fix_pcbuild(project_base) _sys_home = _fix_pcbuild(_sys_home) def _python_build(): if _sys_home: return _is_python_source_dir(_sys_home) return _is_python_source_dir(project_base) python_build = _python_build() # Calculate the build qualifier flags if they are defined. Adding the flags # to the include and lib directories only makes sense for an installation, not # an in-source build. build_flags = '' try: if not python_build: build_flags = sys.abiflags except AttributeError: # It's not a configure-based build, so the sys module doesn't have # this attribute, which is fine. pass def get_python_version(): """Return a string containing the major and minor Python version, leaving off the patchlevel. Sample return values could be '1.5' or '2.2'. """ return '%d.%d' % sys.version_info[:2] def get_python_inc(plat_specific=0, prefix=None): """Return the directory containing installed Python header files. If 'plat_specific' is false (the default), this is the path to the non-platform-specific header files, i.e. Python.h and so on; otherwise, this is the path to platform-specific header files (namely pyconfig.h). If 'prefix' is supplied, use it instead of sys.base_prefix or sys.base_exec_prefix -- i.e., ignore 'plat_specific'. """ if prefix is None: prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX if IS_PYPY: return os.path.join(prefix, 'include') elif os.name == "posix": if python_build: # Assume the executable is in the build directory. The # pyconfig.h file should be in the same directory. Since # the build directory may not be the source directory, we # must use "srcdir" from the makefile to find the "Include" # directory. if plat_specific: return _sys_home or project_base else: incdir = os.path.join(get_config_var('srcdir'), 'Include') return os.path.normpath(incdir) python_dir = 'python' + get_python_version() + build_flags return os.path.join(prefix, "include", python_dir) elif os.name == "nt": if python_build: # Include both the include and PC dir to ensure we can find # pyconfig.h return (os.path.join(prefix, "include") + os.path.pathsep + os.path.join(prefix, "PC")) return os.path.join(prefix, "include") else: raise DistutilsPlatformError( "I don't know where Python installs its C header files " "on platform '%s'" % os.name) def get_python_lib(plat_specific=0, standard_lib=0, prefix=None): """Return the directory containing the Python library (standard or site additions). If 'plat_specific' is true, return the directory containing platform-specific modules, i.e. any module from a non-pure-Python module distribution; otherwise, return the platform-shared library directory. If 'standard_lib' is true, return the directory containing standard Python library modules; otherwise, return the directory for site-specific modules. If 'prefix' is supplied, use it instead of sys.base_prefix or sys.base_exec_prefix -- i.e., ignore 'plat_specific'. """ if IS_PYPY: # PyPy-specific schema if prefix is None: prefix = PREFIX if standard_lib: return os.path.join(prefix, "lib-python", sys.version[0]) return os.path.join(prefix, 'site-packages') if prefix is None: if standard_lib: prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX else: prefix = plat_specific and EXEC_PREFIX or PREFIX if os.name == "posix": if plat_specific or standard_lib: # Platform-specific modules (any module from a non-pure-Python # module distribution) or standard Python library modules. libdir = getattr(sys, "platlibdir", "lib") else: # Pure Python libdir = "lib" libpython = os.path.join(prefix, libdir, "python" + get_python_version()) if standard_lib: return libpython else: return os.path.join(libpython, "site-packages") elif os.name == "nt": if standard_lib: return os.path.join(prefix, "Lib") else: return os.path.join(prefix, "Lib", "site-packages") else: raise DistutilsPlatformError( "I don't know where Python installs its library " "on platform '%s'" % os.name) def customize_compiler(compiler): """Do any platform-specific customization of a CCompiler instance. Mainly needed on Unix, so we can plug in the information that varies across Unices and is stored in Python's Makefile. """ if compiler.compiler_type == "unix": if sys.platform == "darwin": # Perform first-time customization of compiler-related # config vars on OS X now that we know we need a compiler. # This is primarily to support Pythons from binary # installers. The kind and paths to build tools on # the user system may vary significantly from the system # that Python itself was built on. Also the user OS # version and build tools may not support the same set # of CPU architectures for universal builds. global _config_vars # Use get_config_var() to ensure _config_vars is initialized. if not get_config_var('CUSTOMIZED_OSX_COMPILER'): import _osx_support _osx_support.customize_compiler(_config_vars) _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' (cc, cxx, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \ get_config_vars('CC', 'CXX', 'CFLAGS', 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS') if 'CC' in os.environ: newcc = os.environ['CC'] if (sys.platform == 'darwin' and 'LDSHARED' not in os.environ and ldshared.startswith(cc)): # On OS X, if CC is overridden, use that as the default # command for LDSHARED as well ldshared = newcc + ldshared[len(cc):] cc = newcc if 'CXX' in os.environ: cxx = os.environ['CXX'] if 'LDSHARED' in os.environ: ldshared = os.environ['LDSHARED'] if 'CPP' in os.environ: cpp = os.environ['CPP'] else: cpp = cc + " -E" # not always if 'LDFLAGS' in os.environ: ldshared = ldshared + ' ' + os.environ['LDFLAGS'] if 'CFLAGS' in os.environ: cflags = cflags + ' ' + os.environ['CFLAGS'] ldshared = ldshared + ' ' + os.environ['CFLAGS'] if 'CPPFLAGS' in os.environ: cpp = cpp + ' ' + os.environ['CPPFLAGS'] cflags = cflags + ' ' + os.environ['CPPFLAGS'] ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] if 'AR' in os.environ: ar = os.environ['AR'] if 'ARFLAGS' in os.environ: archiver = ar + ' ' + os.environ['ARFLAGS'] else: archiver = ar + ' ' + ar_flags cc_cmd = cc + ' ' + cflags compiler.set_executables( preprocessor=cpp, compiler=cc_cmd, compiler_so=cc_cmd + ' ' + ccshared, compiler_cxx=cxx, linker_so=ldshared, linker_exe=cc, archiver=archiver) compiler.shared_lib_extension = shlib_suffix def get_config_h_filename(): """Return full pathname of installed pyconfig.h file.""" if python_build: if os.name == "nt": inc_dir = os.path.join(_sys_home or project_base, "PC") else: inc_dir = _sys_home or project_base else: inc_dir = get_python_inc(plat_specific=1) return os.path.join(inc_dir, 'pyconfig.h') def get_makefile_filename(): """Return full pathname of installed Makefile from the Python build.""" if python_build: return os.path.join(_sys_home or project_base, "Makefile") lib_dir = get_python_lib(plat_specific=0, standard_lib=1) config_file = 'config-{}{}'.format(get_python_version(), build_flags) if hasattr(sys.implementation, '_multiarch'): config_file += '-%s' % sys.implementation._multiarch return os.path.join(lib_dir, config_file, 'Makefile') def parse_config_h(fp, g=None): """Parse a config.h-style file. A dictionary containing name/value pairs is returned. If an optional dictionary is passed in as the second argument, it is used instead of a new dictionary. """ if g is None: g = {} define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") # while True: line = fp.readline() if not line: break m = define_rx.match(line) if m: n, v = m.group(1, 2) try: v = int(v) except ValueError: pass g[n] = v else: m = undef_rx.match(line) if m: g[m.group(1)] = 0 return g # Regexes needed for parsing Makefile (and similar syntaxes, # like old-style Setup files). _variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") def parse_makefile(fn, g=None): """Parse a Makefile-style file. A dictionary containing name/value pairs is returned. If an optional dictionary is passed in as the second argument, it is used instead of a new dictionary. """ from distutils.text_file import TextFile fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape") if g is None: g = {} done = {} notdone = {} while True: line = fp.readline() if line is None: # eof break m = _variable_rx.match(line) if m: n, v = m.group(1, 2) v = v.strip() # `$$' is a literal `$' in make tmpv = v.replace('$$', '') if "$" in tmpv: notdone[n] = v else: try: v = int(v) except ValueError: # insert literal `$' done[n] = v.replace('$$', '$') else: done[n] = v # Variables with a 'PY_' prefix in the makefile. These need to # be made available without that prefix through sysconfig. # Special care is needed to ensure that variable expansion works, even # if the expansion uses the name without a prefix. renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') # do variable interpolation here while notdone: for name in list(notdone): value = notdone[name] m = _findvar1_rx.search(value) or _findvar2_rx.search(value) if m: n = m.group(1) found = True if n in done: item = str(done[n]) elif n in notdone: # get it on a subsequent round found = False elif n in os.environ: # do it like make: fall back to environment item = os.environ[n] elif n in renamed_variables: if name.startswith('PY_') and name[3:] in renamed_variables: item = "" elif 'PY_' + n in notdone: found = False else: item = str(done['PY_' + n]) else: done[n] = item = "" if found: after = value[m.end():] value = value[:m.start()] + item + after if "$" in after: notdone[name] = value else: try: value = int(value) except ValueError: done[name] = value.strip() else: done[name] = value del notdone[name] if name.startswith('PY_') \ and name[3:] in renamed_variables: name = name[3:] if name not in done: done[name] = value else: # bogus variable reference; just drop it since we can't deal del notdone[name] fp.close() # strip spurious spaces for k, v in done.items(): if isinstance(v, str): done[k] = v.strip() # save the results in the global dictionary g.update(done) return g def expand_makefile_vars(s, vars): """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in 'string' according to 'vars' (a dictionary mapping variable names to values). Variables not present in 'vars' are silently expanded to the empty string. The variable values in 'vars' should not contain further variable expansions; if 'vars' is the output of 'parse_makefile()', you're fine. Returns a variable-expanded version of 's'. """ # This algorithm does multiple expansion, so if vars['foo'] contains # "${bar}", it will expand ${foo} to ${bar}, and then expand # ${bar}... and so forth. This is fine as long as 'vars' comes from # 'parse_makefile()', which takes care of such expansions eagerly, # according to make's variable expansion semantics. while True: m = _findvar1_rx.search(s) or _findvar2_rx.search(s) if m: (beg, end) = m.span() s = s[0:beg] + vars.get(m.group(1)) + s[end:] else: break return s _config_vars = None def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" # _sysconfigdata is generated at build time, see the sysconfig module name = os.environ.get('_PYTHON_SYSCONFIGDATA_NAME', '_sysconfigdata_{abi}_{platform}_{multiarch}'.format( abi=sys.abiflags, platform=sys.platform, multiarch=getattr(sys.implementation, '_multiarch', ''), )) try: _temp = __import__(name, globals(), locals(), ['build_time_vars'], 0) except ImportError: # Python 3.5 and pypy 7.3.1 _temp = __import__( '_sysconfigdata', globals(), locals(), ['build_time_vars'], 0) build_time_vars = _temp.build_time_vars global _config_vars _config_vars = {} _config_vars.update(build_time_vars) def _init_nt(): """Initialize the module as appropriate for NT""" g = {} # set basic install directories g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1) g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1) # XXX hmmm.. a normal install puts include files here g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['EXT_SUFFIX'] = _imp.extension_suffixes()[0] g['EXE'] = ".exe" g['VERSION'] = get_python_version().replace(".", "") g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable)) global _config_vars _config_vars = g def get_config_vars(*args): """With no arguments, return a dictionary of all configuration variables relevant for the current platform. Generally this includes everything needed to build extensions and install both pure modules and extensions. On Unix, this means every variable defined in Python's installed Makefile; on Windows it's a much smaller set. With arguments, return a list of values that result from looking up each argument in the configuration variable dictionary. """ global _config_vars if _config_vars is None: func = globals().get("_init_" + os.name) if func: func() else: _config_vars = {} # Normalized versions of prefix and exec_prefix are handy to have; # in fact, these are the standard versions used most places in the # Distutils. _config_vars['prefix'] = PREFIX _config_vars['exec_prefix'] = EXEC_PREFIX if not IS_PYPY: # For backward compatibility, see issue19555 SO = _config_vars.get('EXT_SUFFIX') if SO is not None: _config_vars['SO'] = SO # Always convert srcdir to an absolute path srcdir = _config_vars.get('srcdir', project_base) if os.name == 'posix': if python_build: # If srcdir is a relative path (typically '.' or '..') # then it should be interpreted relative to the directory # containing Makefile. base = os.path.dirname(get_makefile_filename()) srcdir = os.path.join(base, srcdir) else: # srcdir is not meaningful since the installation is # spread about the filesystem. We choose the # directory containing the Makefile since we know it # exists. srcdir = os.path.dirname(get_makefile_filename()) _config_vars['srcdir'] = os.path.abspath(os.path.normpath(srcdir)) # Convert srcdir into an absolute path if it appears necessary. # Normally it is relative to the build directory. However, during # testing, for example, we might be running a non-installed python # from a different directory. if python_build and os.name == "posix": base = project_base if (not os.path.isabs(_config_vars['srcdir']) and base != os.getcwd()): # srcdir is relative and we are not in the same directory # as the executable. Assume executable is in the build # directory and make srcdir absolute. srcdir = os.path.join(base, _config_vars['srcdir']) _config_vars['srcdir'] = os.path.normpath(srcdir) # OS X platforms require special customization to handle # multi-architecture, multi-os-version installers if sys.platform == 'darwin': import _osx_support _osx_support.customize_config_vars(_config_vars) if args: vals = [] for name in args: vals.append(_config_vars.get(name)) return vals else: return _config_vars def get_config_var(name): """Return the value of a single variable using the dictionary returned by 'get_config_vars()'. Equivalent to get_config_vars().get(name) """ if name == 'SO': import warnings warnings.warn('SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2) return get_config_vars().get(name)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/dist.py
"""distutils.dist Provides the Distribution class, which represents the module distribution being built/installed/distributed. """ import sys import os import re from email import message_from_file try: import warnings except ImportError: warnings = None from distutils.errors import * from distutils.fancy_getopt import FancyGetopt, translate_longopt from distutils.util import check_environ, strtobool, rfc822_escape from distutils import log from distutils.debug import DEBUG # Regex to define acceptable Distutils command names. This is not *quite* # the same as a Python NAME -- I don't allow leading underscores. The fact # that they're very similar is no coincidence; the default naming scheme is # to look for a Python module named after the command. command_re = re.compile(r'^[a-zA-Z]([a-zA-Z0-9_]*)$') def _ensure_list(value, fieldname): if isinstance(value, str): # a string containing comma separated values is okay. It will # be converted to a list by Distribution.finalize_options(). pass elif not isinstance(value, list): # passing a tuple or an iterator perhaps, warn and convert typename = type(value).__name__ msg = "Warning: '{fieldname}' should be a list, got type '{typename}'" msg = msg.format(**locals()) log.log(log.WARN, msg) value = list(value) return value class Distribution: """The core of the Distutils. Most of the work hiding behind 'setup' is really done within a Distribution instance, which farms the work out to the Distutils commands specified on the command line. Setup scripts will almost never instantiate Distribution directly, unless the 'setup()' function is totally inadequate to their needs. However, it is conceivable that a setup script might wish to subclass Distribution for some specialized purpose, and then pass the subclass to 'setup()' as the 'distclass' keyword argument. If so, it is necessary to respect the expectations that 'setup' has of Distribution. See the code for 'setup()', in core.py, for details. """ # 'global_options' describes the command-line options that may be # supplied to the setup script prior to any actual commands. # Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of # these global options. This list should be kept to a bare minimum, # since every global option is also valid as a command option -- and we # don't want to pollute the commands with too many options that they # have minimal control over. # The fourth entry for verbose means that it can be repeated. global_options = [ ('verbose', 'v', "run verbosely (default)", 1), ('quiet', 'q', "run quietly (turns verbosity off)"), ('dry-run', 'n', "don't actually do anything"), ('help', 'h', "show detailed help message"), ('no-user-cfg', None, 'ignore pydistutils.cfg in your home directory'), ] # 'common_usage' is a short (2-3 line) string describing the common # usage of the setup script. common_usage = """\ Common commands: (see '--help-commands' for more) setup.py build will build the package underneath 'build/' setup.py install will install the package """ # options that are not propagated to the commands display_options = [ ('help-commands', None, "list all available commands"), ('name', None, "print package name"), ('version', 'V', "print package version"), ('fullname', None, "print <package name>-<version>"), ('author', None, "print the author's name"), ('author-email', None, "print the author's email address"), ('maintainer', None, "print the maintainer's name"), ('maintainer-email', None, "print the maintainer's email address"), ('contact', None, "print the maintainer's name if known, else the author's"), ('contact-email', None, "print the maintainer's email address if known, else the author's"), ('url', None, "print the URL for this package"), ('license', None, "print the license of the package"), ('licence', None, "alias for --license"), ('description', None, "print the package description"), ('long-description', None, "print the long package description"), ('platforms', None, "print the list of platforms"), ('classifiers', None, "print the list of classifiers"), ('keywords', None, "print the list of keywords"), ('provides', None, "print the list of packages/modules provided"), ('requires', None, "print the list of packages/modules required"), ('obsoletes', None, "print the list of packages/modules made obsolete") ] display_option_names = [translate_longopt(x[0]) for x in display_options] # negative options are options that exclude other options negative_opt = {'quiet': 'verbose'} # -- Creation/initialization methods ------------------------------- def __init__(self, attrs=None): """Construct a new Distribution instance: initialize all the attributes of a Distribution, and then use 'attrs' (a dictionary mapping attribute names to values) to assign some of those attributes their "real" values. (Any attributes not mentioned in 'attrs' will be assigned to some null value: 0, None, an empty list or dictionary, etc.) Most importantly, initialize the 'command_obj' attribute to the empty dictionary; this will be filled in with real command objects by 'parse_command_line()'. """ # Default values for our command-line options self.verbose = 1 self.dry_run = 0 self.help = 0 for attr in self.display_option_names: setattr(self, attr, 0) # Store the distribution meta-data (name, version, author, and so # forth) in a separate object -- we're getting to have enough # information here (and enough command-line options) that it's # worth it. Also delegate 'get_XXX()' methods to the 'metadata' # object in a sneaky and underhanded (but efficient!) way. self.metadata = DistributionMetadata() for basename in self.metadata._METHOD_BASENAMES: method_name = "get_" + basename setattr(self, method_name, getattr(self.metadata, method_name)) # 'cmdclass' maps command names to class objects, so we # can 1) quickly figure out which class to instantiate when # we need to create a new command object, and 2) have a way # for the setup script to override command classes self.cmdclass = {} # 'command_packages' is a list of packages in which commands # are searched for. The factory for command 'foo' is expected # to be named 'foo' in the module 'foo' in one of the packages # named here. This list is searched from the left; an error # is raised if no named package provides the command being # searched for. (Always access using get_command_packages().) self.command_packages = None # 'script_name' and 'script_args' are usually set to sys.argv[0] # and sys.argv[1:], but they can be overridden when the caller is # not necessarily a setup script run from the command-line. self.script_name = None self.script_args = None # 'command_options' is where we store command options between # parsing them (from config files, the command-line, etc.) and when # they are actually needed -- ie. when the command in question is # instantiated. It is a dictionary of dictionaries of 2-tuples: # command_options = { command_name : { option : (source, value) } } self.command_options = {} # 'dist_files' is the list of (command, pyversion, file) that # have been created by any dist commands run so far. This is # filled regardless of whether the run is dry or not. pyversion # gives sysconfig.get_python_version() if the dist file is # specific to a Python version, 'any' if it is good for all # Python versions on the target platform, and '' for a source # file. pyversion should not be used to specify minimum or # maximum required Python versions; use the metainfo for that # instead. self.dist_files = [] # These options are really the business of various commands, rather # than of the Distribution itself. We provide aliases for them in # Distribution as a convenience to the developer. self.packages = None self.package_data = {} self.package_dir = None self.py_modules = None self.libraries = None self.headers = None self.ext_modules = None self.ext_package = None self.include_dirs = None self.extra_path = None self.scripts = None self.data_files = None self.password = '' # And now initialize bookkeeping stuff that can't be supplied by # the caller at all. 'command_obj' maps command names to # Command instances -- that's how we enforce that every command # class is a singleton. self.command_obj = {} # 'have_run' maps command names to boolean values; it keeps track # of whether we have actually run a particular command, to make it # cheap to "run" a command whenever we think we might need to -- if # it's already been done, no need for expensive filesystem # operations, we just check the 'have_run' dictionary and carry on. # It's only safe to query 'have_run' for a command class that has # been instantiated -- a false value will be inserted when the # command object is created, and replaced with a true value when # the command is successfully run. Thus it's probably best to use # '.get()' rather than a straight lookup. self.have_run = {} # Now we'll use the attrs dictionary (ultimately, keyword args from # the setup script) to possibly override any or all of these # distribution options. if attrs: # Pull out the set of command options and work on them # specifically. Note that this order guarantees that aliased # command options will override any supplied redundantly # through the general options dictionary. options = attrs.get('options') if options is not None: del attrs['options'] for (command, cmd_options) in options.items(): opt_dict = self.get_option_dict(command) for (opt, val) in cmd_options.items(): opt_dict[opt] = ("setup script", val) if 'licence' in attrs: attrs['license'] = attrs['licence'] del attrs['licence'] msg = "'licence' distribution option is deprecated; use 'license'" if warnings is not None: warnings.warn(msg) else: sys.stderr.write(msg + "\n") # Now work on the rest of the attributes. Any attribute that's # not already defined is invalid! for (key, val) in attrs.items(): if hasattr(self.metadata, "set_" + key): getattr(self.metadata, "set_" + key)(val) elif hasattr(self.metadata, key): setattr(self.metadata, key, val) elif hasattr(self, key): setattr(self, key, val) else: msg = "Unknown distribution option: %s" % repr(key) warnings.warn(msg) # no-user-cfg is handled before other command line args # because other args override the config files, and this # one is needed before we can load the config files. # If attrs['script_args'] wasn't passed, assume false. # # This also make sure we just look at the global options self.want_user_cfg = True if self.script_args is not None: for arg in self.script_args: if not arg.startswith('-'): break if arg == '--no-user-cfg': self.want_user_cfg = False break self.finalize_options() def get_option_dict(self, command): """Get the option dictionary for a given command. If that command's option dictionary hasn't been created yet, then create it and return the new dictionary; otherwise, return the existing option dictionary. """ dict = self.command_options.get(command) if dict is None: dict = self.command_options[command] = {} return dict def dump_option_dicts(self, header=None, commands=None, indent=""): from pprint import pformat if commands is None: # dump all command option dicts commands = sorted(self.command_options.keys()) if header is not None: self.announce(indent + header) indent = indent + " " if not commands: self.announce(indent + "no commands known yet") return for cmd_name in commands: opt_dict = self.command_options.get(cmd_name) if opt_dict is None: self.announce(indent + "no option dict for '%s' command" % cmd_name) else: self.announce(indent + "option dict for '%s' command:" % cmd_name) out = pformat(opt_dict) for line in out.split('\n'): self.announce(indent + " " + line) # -- Config file finding/parsing methods --------------------------- def find_config_files(self): """Find as many configuration files as should be processed for this platform, and return a list of filenames in the order in which they should be parsed. The filenames returned are guaranteed to exist (modulo nasty race conditions). There are three possible config files: distutils.cfg in the Distutils installation directory (ie. where the top-level Distutils __inst__.py file lives), a file in the user's home directory named .pydistutils.cfg on Unix and pydistutils.cfg on Windows/Mac; and setup.cfg in the current directory. The file in the user's home directory can be disabled with the --no-user-cfg option. """ files = [] check_environ() # Where to look for the system-wide Distutils config file sys_dir = os.path.dirname(sys.modules['distutils'].__file__) # Look for the system config file sys_file = os.path.join(sys_dir, "distutils.cfg") if os.path.isfile(sys_file): files.append(sys_file) # What to call the per-user config file if os.name == 'posix': user_filename = ".pydistutils.cfg" else: user_filename = "pydistutils.cfg" # And look for the user config file if self.want_user_cfg: user_file = os.path.join(os.path.expanduser('~'), user_filename) if os.path.isfile(user_file): files.append(user_file) # All platforms support local setup.cfg local_file = "setup.cfg" if os.path.isfile(local_file): files.append(local_file) if DEBUG: self.announce("using config files: %s" % ', '.join(files)) return files def parse_config_files(self, filenames=None): from configparser import ConfigParser # Ignore install directory options if we have a venv if sys.prefix != sys.base_prefix: ignore_options = [ 'install-base', 'install-platbase', 'install-lib', 'install-platlib', 'install-purelib', 'install-headers', 'install-scripts', 'install-data', 'prefix', 'exec-prefix', 'home', 'user', 'root'] else: ignore_options = [] ignore_options = frozenset(ignore_options) if filenames is None: filenames = self.find_config_files() if DEBUG: self.announce("Distribution.parse_config_files():") parser = ConfigParser() for filename in filenames: if DEBUG: self.announce(" reading %s" % filename) parser.read(filename) for section in parser.sections(): options = parser.options(section) opt_dict = self.get_option_dict(section) for opt in options: if opt != '__name__' and opt not in ignore_options: val = parser.get(section,opt) opt = opt.replace('-', '_') opt_dict[opt] = (filename, val) # Make the ConfigParser forget everything (so we retain # the original filenames that options come from) parser.__init__() # If there was a "global" section in the config file, use it # to set Distribution options. if 'global' in self.command_options: for (opt, (src, val)) in self.command_options['global'].items(): alias = self.negative_opt.get(opt) try: if alias: setattr(self, alias, not strtobool(val)) elif opt in ('verbose', 'dry_run'): # ugh! setattr(self, opt, strtobool(val)) else: setattr(self, opt, val) except ValueError as msg: raise DistutilsOptionError(msg) # -- Command-line parsing methods ---------------------------------- def parse_command_line(self): """Parse the setup script's command line, taken from the 'script_args' instance attribute (which defaults to 'sys.argv[1:]' -- see 'setup()' in core.py). This list is first processed for "global options" -- options that set attributes of the Distribution instance. Then, it is alternately scanned for Distutils commands and options for that command. Each new command terminates the options for the previous command. The allowed options for a command are determined by the 'user_options' attribute of the command class -- thus, we have to be able to load command classes in order to parse the command line. Any error in that 'options' attribute raises DistutilsGetoptError; any error on the command-line raises DistutilsArgError. If no Distutils commands were found on the command line, raises DistutilsArgError. Return true if command-line was successfully parsed and we should carry on with executing commands; false if no errors but we shouldn't execute commands (currently, this only happens if user asks for help). """ # # We now have enough information to show the Macintosh dialog # that allows the user to interactively specify the "command line". # toplevel_options = self._get_toplevel_options() # We have to parse the command line a bit at a time -- global # options, then the first command, then its options, and so on -- # because each command will be handled by a different class, and # the options that are valid for a particular class aren't known # until we have loaded the command class, which doesn't happen # until we know what the command is. self.commands = [] parser = FancyGetopt(toplevel_options + self.display_options) parser.set_negative_aliases(self.negative_opt) parser.set_aliases({'licence': 'license'}) args = parser.getopt(args=self.script_args, object=self) option_order = parser.get_option_order() log.set_verbosity(self.verbose) # for display options we return immediately if self.handle_display_options(option_order): return while args: args = self._parse_command_opts(parser, args) if args is None: # user asked for help (and got it) return # Handle the cases of --help as a "global" option, ie. # "setup.py --help" and "setup.py --help command ...". For the # former, we show global options (--verbose, --dry-run, etc.) # and display-only options (--name, --version, etc.); for the # latter, we omit the display-only options and show help for # each command listed on the command line. if self.help: self._show_help(parser, display_options=len(self.commands) == 0, commands=self.commands) return # Oops, no commands found -- an end-user error if not self.commands: raise DistutilsArgError("no commands supplied") # All is well: return true return True def _get_toplevel_options(self): """Return the non-display options recognized at the top level. This includes options that are recognized *only* at the top level as well as options recognized for commands. """ return self.global_options + [ ("command-packages=", None, "list of packages that provide distutils commands"), ] def _parse_command_opts(self, parser, args): """Parse the command-line options for a single command. 'parser' must be a FancyGetopt instance; 'args' must be the list of arguments, starting with the current command (whose options we are about to parse). Returns a new version of 'args' with the next command at the front of the list; will be the empty list if there are no more commands on the command line. Returns None if the user asked for help on this command. """ # late import because of mutual dependence between these modules from distutils.cmd import Command # Pull the current command from the head of the command line command = args[0] if not command_re.match(command): raise SystemExit("invalid command name '%s'" % command) self.commands.append(command) # Dig up the command class that implements this command, so we # 1) know that it's a valid command, and 2) know which options # it takes. try: cmd_class = self.get_command_class(command) except DistutilsModuleError as msg: raise DistutilsArgError(msg) # Require that the command class be derived from Command -- want # to be sure that the basic "command" interface is implemented. if not issubclass(cmd_class, Command): raise DistutilsClassError( "command class %s must subclass Command" % cmd_class) # Also make sure that the command object provides a list of its # known options. if not (hasattr(cmd_class, 'user_options') and isinstance(cmd_class.user_options, list)): msg = ("command class %s must provide " "'user_options' attribute (a list of tuples)") raise DistutilsClassError(msg % cmd_class) # If the command class has a list of negative alias options, # merge it in with the global negative aliases. negative_opt = self.negative_opt if hasattr(cmd_class, 'negative_opt'): negative_opt = negative_opt.copy() negative_opt.update(cmd_class.negative_opt) # Check for help_options in command class. They have a different # format (tuple of four) so we need to preprocess them here. if (hasattr(cmd_class, 'help_options') and isinstance(cmd_class.help_options, list)): help_options = fix_help_options(cmd_class.help_options) else: help_options = [] # All commands support the global options too, just by adding # in 'global_options'. parser.set_option_table(self.global_options + cmd_class.user_options + help_options) parser.set_negative_aliases(negative_opt) (args, opts) = parser.getopt(args[1:]) if hasattr(opts, 'help') and opts.help: self._show_help(parser, display_options=0, commands=[cmd_class]) return if (hasattr(cmd_class, 'help_options') and isinstance(cmd_class.help_options, list)): help_option_found=0 for (help_option, short, desc, func) in cmd_class.help_options: if hasattr(opts, parser.get_attr_name(help_option)): help_option_found=1 if callable(func): func() else: raise DistutilsClassError( "invalid help function %r for help option '%s': " "must be a callable object (function, etc.)" % (func, help_option)) if help_option_found: return # Put the options from the command-line into their official # holding pen, the 'command_options' dictionary. opt_dict = self.get_option_dict(command) for (name, value) in vars(opts).items(): opt_dict[name] = ("command line", value) return args def finalize_options(self): """Set final values for all the options on the Distribution instance, analogous to the .finalize_options() method of Command objects. """ for attr in ('keywords', 'platforms'): value = getattr(self.metadata, attr) if value is None: continue if isinstance(value, str): value = [elm.strip() for elm in value.split(',')] setattr(self.metadata, attr, value) def _show_help(self, parser, global_options=1, display_options=1, commands=[]): """Show help for the setup script command-line in the form of several lists of command-line options. 'parser' should be a FancyGetopt instance; do not expect it to be returned in the same state, as its option table will be reset to make it generate the correct help text. If 'global_options' is true, lists the global options: --verbose, --dry-run, etc. If 'display_options' is true, lists the "display-only" options: --name, --version, etc. Finally, lists per-command help for every command name or command class in 'commands'. """ # late import because of mutual dependence between these modules from distutils.core import gen_usage from distutils.cmd import Command if global_options: if display_options: options = self._get_toplevel_options() else: options = self.global_options parser.set_option_table(options) parser.print_help(self.common_usage + "\nGlobal options:") print('') if display_options: parser.set_option_table(self.display_options) parser.print_help( "Information display options (just display " + "information, ignore any commands)") print('') for command in self.commands: if isinstance(command, type) and issubclass(command, Command): klass = command else: klass = self.get_command_class(command) if (hasattr(klass, 'help_options') and isinstance(klass.help_options, list)): parser.set_option_table(klass.user_options + fix_help_options(klass.help_options)) else: parser.set_option_table(klass.user_options) parser.print_help("Options for '%s' command:" % klass.__name__) print('') print(gen_usage(self.script_name)) def handle_display_options(self, option_order): """If there were any non-global "display-only" options (--help-commands or the metadata display options) on the command line, display the requested info and return true; else return false. """ from distutils.core import gen_usage # User just wants a list of commands -- we'll print it out and stop # processing now (ie. if they ran "setup --help-commands foo bar", # we ignore "foo bar"). if self.help_commands: self.print_commands() print('') print(gen_usage(self.script_name)) return 1 # If user supplied any of the "display metadata" options, then # display that metadata in the order in which the user supplied the # metadata options. any_display_options = 0 is_display_option = {} for option in self.display_options: is_display_option[option[0]] = 1 for (opt, val) in option_order: if val and is_display_option.get(opt): opt = translate_longopt(opt) value = getattr(self.metadata, "get_"+opt)() if opt in ['keywords', 'platforms']: print(','.join(value)) elif opt in ('classifiers', 'provides', 'requires', 'obsoletes'): print('\n'.join(value)) else: print(value) any_display_options = 1 return any_display_options def print_command_list(self, commands, header, max_length): """Print a subset of the list of all commands -- used by 'print_commands()'. """ print(header + ":") for cmd in commands: klass = self.cmdclass.get(cmd) if not klass: klass = self.get_command_class(cmd) try: description = klass.description except AttributeError: description = "(no description available)" print(" %-*s %s" % (max_length, cmd, description)) def print_commands(self): """Print out a help message listing all available commands with a description of each. The list is divided into "standard commands" (listed in distutils.command.__all__) and "extra commands" (mentioned in self.cmdclass, but not a standard command). The descriptions come from the command class attribute 'description'. """ import distutils.command std_commands = distutils.command.__all__ is_std = {} for cmd in std_commands: is_std[cmd] = 1 extra_commands = [] for cmd in self.cmdclass.keys(): if not is_std.get(cmd): extra_commands.append(cmd) max_length = 0 for cmd in (std_commands + extra_commands): if len(cmd) > max_length: max_length = len(cmd) self.print_command_list(std_commands, "Standard commands", max_length) if extra_commands: print() self.print_command_list(extra_commands, "Extra commands", max_length) def get_command_list(self): """Get a list of (command, description) tuples. The list is divided into "standard commands" (listed in distutils.command.__all__) and "extra commands" (mentioned in self.cmdclass, but not a standard command). The descriptions come from the command class attribute 'description'. """ # Currently this is only used on Mac OS, for the Mac-only GUI # Distutils interface (by Jack Jansen) import distutils.command std_commands = distutils.command.__all__ is_std = {} for cmd in std_commands: is_std[cmd] = 1 extra_commands = [] for cmd in self.cmdclass.keys(): if not is_std.get(cmd): extra_commands.append(cmd) rv = [] for cmd in (std_commands + extra_commands): klass = self.cmdclass.get(cmd) if not klass: klass = self.get_command_class(cmd) try: description = klass.description except AttributeError: description = "(no description available)" rv.append((cmd, description)) return rv # -- Command class/object methods ---------------------------------- def get_command_packages(self): """Return a list of packages from which commands are loaded.""" pkgs = self.command_packages if not isinstance(pkgs, list): if pkgs is None: pkgs = '' pkgs = [pkg.strip() for pkg in pkgs.split(',') if pkg != ''] if "distutils.command" not in pkgs: pkgs.insert(0, "distutils.command") self.command_packages = pkgs return pkgs def get_command_class(self, command): """Return the class that implements the Distutils command named by 'command'. First we check the 'cmdclass' dictionary; if the command is mentioned there, we fetch the class object from the dictionary and return it. Otherwise we load the command module ("distutils.command." + command) and fetch the command class from the module. The loaded class is also stored in 'cmdclass' to speed future calls to 'get_command_class()'. Raises DistutilsModuleError if the expected module could not be found, or if that module does not define the expected class. """ klass = self.cmdclass.get(command) if klass: return klass for pkgname in self.get_command_packages(): module_name = "%s.%s" % (pkgname, command) klass_name = command try: __import__(module_name) module = sys.modules[module_name] except ImportError: continue try: klass = getattr(module, klass_name) except AttributeError: raise DistutilsModuleError( "invalid command '%s' (no class '%s' in module '%s')" % (command, klass_name, module_name)) self.cmdclass[command] = klass return klass raise DistutilsModuleError("invalid command '%s'" % command) def get_command_obj(self, command, create=1): """Return the command object for 'command'. Normally this object is cached on a previous call to 'get_command_obj()'; if no command object for 'command' is in the cache, then we either create and return it (if 'create' is true) or return None. """ cmd_obj = self.command_obj.get(command) if not cmd_obj and create: if DEBUG: self.announce("Distribution.get_command_obj(): " "creating '%s' command object" % command) klass = self.get_command_class(command) cmd_obj = self.command_obj[command] = klass(self) self.have_run[command] = 0 # Set any options that were supplied in config files # or on the command line. (NB. support for error # reporting is lame here: any errors aren't reported # until 'finalize_options()' is called, which means # we won't report the source of the error.) options = self.command_options.get(command) if options: self._set_command_options(cmd_obj, options) return cmd_obj def _set_command_options(self, command_obj, option_dict=None): """Set the options for 'command_obj' from 'option_dict'. Basically this means copying elements of a dictionary ('option_dict') to attributes of an instance ('command'). 'command_obj' must be a Command instance. If 'option_dict' is not supplied, uses the standard option dictionary for this command (from 'self.command_options'). """ command_name = command_obj.get_command_name() if option_dict is None: option_dict = self.get_option_dict(command_name) if DEBUG: self.announce(" setting options for '%s' command:" % command_name) for (option, (source, value)) in option_dict.items(): if DEBUG: self.announce(" %s = %s (from %s)" % (option, value, source)) try: bool_opts = [translate_longopt(o) for o in command_obj.boolean_options] except AttributeError: bool_opts = [] try: neg_opt = command_obj.negative_opt except AttributeError: neg_opt = {} try: is_string = isinstance(value, str) if option in neg_opt and is_string: setattr(command_obj, neg_opt[option], not strtobool(value)) elif option in bool_opts and is_string: setattr(command_obj, option, strtobool(value)) elif hasattr(command_obj, option): setattr(command_obj, option, value) else: raise DistutilsOptionError( "error in %s: command '%s' has no such option '%s'" % (source, command_name, option)) except ValueError as msg: raise DistutilsOptionError(msg) def reinitialize_command(self, command, reinit_subcommands=0): """Reinitializes a command to the state it was in when first returned by 'get_command_obj()': ie., initialized but not yet finalized. This provides the opportunity to sneak option values in programmatically, overriding or supplementing user-supplied values from the config files and command line. You'll have to re-finalize the command object (by calling 'finalize_options()' or 'ensure_finalized()') before using it for real. 'command' should be a command name (string) or command object. If 'reinit_subcommands' is true, also reinitializes the command's sub-commands, as declared by the 'sub_commands' class attribute (if it has one). See the "install" command for an example. Only reinitializes the sub-commands that actually matter, ie. those whose test predicates return true. Returns the reinitialized command object. """ from distutils.cmd import Command if not isinstance(command, Command): command_name = command command = self.get_command_obj(command_name) else: command_name = command.get_command_name() if not command.finalized: return command command.initialize_options() command.finalized = 0 self.have_run[command_name] = 0 self._set_command_options(command) if reinit_subcommands: for sub in command.get_sub_commands(): self.reinitialize_command(sub, reinit_subcommands) return command # -- Methods that operate on the Distribution ---------------------- def announce(self, msg, level=log.INFO): log.log(level, msg) def run_commands(self): """Run each command that was seen on the setup script command line. Uses the list of commands found and cache of command objects created by 'get_command_obj()'. """ for cmd in self.commands: self.run_command(cmd) # -- Methods that operate on its Commands -------------------------- def run_command(self, command): """Do whatever it takes to run a command (including nothing at all, if the command has already been run). Specifically: if we have already created and run the command named by 'command', return silently without doing anything. If the command named by 'command' doesn't even have a command object yet, create one. Then invoke 'run()' on that command object (or an existing one). """ # Already been here, done that? then return silently. if self.have_run.get(command): return log.info("running %s", command) cmd_obj = self.get_command_obj(command) cmd_obj.ensure_finalized() cmd_obj.run() self.have_run[command] = 1 # -- Distribution query methods ------------------------------------ def has_pure_modules(self): return len(self.packages or self.py_modules or []) > 0 def has_ext_modules(self): return self.ext_modules and len(self.ext_modules) > 0 def has_c_libraries(self): return self.libraries and len(self.libraries) > 0 def has_modules(self): return self.has_pure_modules() or self.has_ext_modules() def has_headers(self): return self.headers and len(self.headers) > 0 def has_scripts(self): return self.scripts and len(self.scripts) > 0 def has_data_files(self): return self.data_files and len(self.data_files) > 0 def is_pure(self): return (self.has_pure_modules() and not self.has_ext_modules() and not self.has_c_libraries()) # -- Metadata query methods ---------------------------------------- # If you're looking for 'get_name()', 'get_version()', and so forth, # they are defined in a sneaky way: the constructor binds self.get_XXX # to self.metadata.get_XXX. The actual code is in the # DistributionMetadata class, below. class DistributionMetadata: """Dummy class to hold the distribution meta-data: name, version, author, and so forth. """ _METHOD_BASENAMES = ("name", "version", "author", "author_email", "maintainer", "maintainer_email", "url", "license", "description", "long_description", "keywords", "platforms", "fullname", "contact", "contact_email", "classifiers", "download_url", # PEP 314 "provides", "requires", "obsoletes", ) def __init__(self, path=None): if path is not None: self.read_pkg_file(open(path)) else: self.name = None self.version = None self.author = None self.author_email = None self.maintainer = None self.maintainer_email = None self.url = None self.license = None self.description = None self.long_description = None self.keywords = None self.platforms = None self.classifiers = None self.download_url = None # PEP 314 self.provides = None self.requires = None self.obsoletes = None def read_pkg_file(self, file): """Reads the metadata values from a file object.""" msg = message_from_file(file) def _read_field(name): value = msg[name] if value == 'UNKNOWN': return None return value def _read_list(name): values = msg.get_all(name, None) if values == []: return None return values metadata_version = msg['metadata-version'] self.name = _read_field('name') self.version = _read_field('version') self.description = _read_field('summary') # we are filling author only. self.author = _read_field('author') self.maintainer = None self.author_email = _read_field('author-email') self.maintainer_email = None self.url = _read_field('home-page') self.license = _read_field('license') if 'download-url' in msg: self.download_url = _read_field('download-url') else: self.download_url = None self.long_description = _read_field('description') self.description = _read_field('summary') if 'keywords' in msg: self.keywords = _read_field('keywords').split(',') self.platforms = _read_list('platform') self.classifiers = _read_list('classifier') # PEP 314 - these fields only exist in 1.1 if metadata_version == '1.1': self.requires = _read_list('requires') self.provides = _read_list('provides') self.obsoletes = _read_list('obsoletes') else: self.requires = None self.provides = None self.obsoletes = None def write_pkg_info(self, base_dir): """Write the PKG-INFO file into the release tree. """ with open(os.path.join(base_dir, 'PKG-INFO'), 'w', encoding='UTF-8') as pkg_info: self.write_pkg_file(pkg_info) def write_pkg_file(self, file): """Write the PKG-INFO format data to a file object. """ version = '1.0' if (self.provides or self.requires or self.obsoletes or self.classifiers or self.download_url): version = '1.1' file.write('Metadata-Version: %s\n' % version) file.write('Name: %s\n' % self.get_name()) file.write('Version: %s\n' % self.get_version()) file.write('Summary: %s\n' % self.get_description()) file.write('Home-page: %s\n' % self.get_url()) file.write('Author: %s\n' % self.get_contact()) file.write('Author-email: %s\n' % self.get_contact_email()) file.write('License: %s\n' % self.get_license()) if self.download_url: file.write('Download-URL: %s\n' % self.download_url) long_desc = rfc822_escape(self.get_long_description()) file.write('Description: %s\n' % long_desc) keywords = ','.join(self.get_keywords()) if keywords: file.write('Keywords: %s\n' % keywords) self._write_list(file, 'Platform', self.get_platforms()) self._write_list(file, 'Classifier', self.get_classifiers()) # PEP 314 self._write_list(file, 'Requires', self.get_requires()) self._write_list(file, 'Provides', self.get_provides()) self._write_list(file, 'Obsoletes', self.get_obsoletes()) def _write_list(self, file, name, values): for value in values: file.write('%s: %s\n' % (name, value)) # -- Metadata query methods ---------------------------------------- def get_name(self): return self.name or "UNKNOWN" def get_version(self): return self.version or "0.0.0" def get_fullname(self): return "%s-%s" % (self.get_name(), self.get_version()) def get_author(self): return self.author or "UNKNOWN" def get_author_email(self): return self.author_email or "UNKNOWN" def get_maintainer(self): return self.maintainer or "UNKNOWN" def get_maintainer_email(self): return self.maintainer_email or "UNKNOWN" def get_contact(self): return self.maintainer or self.author or "UNKNOWN" def get_contact_email(self): return self.maintainer_email or self.author_email or "UNKNOWN" def get_url(self): return self.url or "UNKNOWN" def get_license(self): return self.license or "UNKNOWN" get_licence = get_license def get_description(self): return self.description or "UNKNOWN" def get_long_description(self): return self.long_description or "UNKNOWN" def get_keywords(self): return self.keywords or [] def set_keywords(self, value): self.keywords = _ensure_list(value, 'keywords') def get_platforms(self): return self.platforms or ["UNKNOWN"] def set_platforms(self, value): self.platforms = _ensure_list(value, 'platforms') def get_classifiers(self): return self.classifiers or [] def set_classifiers(self, value): self.classifiers = _ensure_list(value, 'classifiers') def get_download_url(self): return self.download_url or "UNKNOWN" # PEP 314 def get_requires(self): return self.requires or [] def set_requires(self, value): import distutils.versionpredicate for v in value: distutils.versionpredicate.VersionPredicate(v) self.requires = list(value) def get_provides(self): return self.provides or [] def set_provides(self, value): value = [v.strip() for v in value] for v in value: import distutils.versionpredicate distutils.versionpredicate.split_provision(v) self.provides = value def get_obsoletes(self): return self.obsoletes or [] def set_obsoletes(self, value): import distutils.versionpredicate for v in value: distutils.versionpredicate.VersionPredicate(v) self.obsoletes = list(value) def fix_help_options(options): """Convert a 4-tuple 'help_options' list as found in various command classes to the 3-tuple form required by FancyGetopt. """ new_options = [] for help_tuple in options: new_options.append(help_tuple[0:3]) return new_options
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/bcppcompiler.py
"""distutils.bcppcompiler Contains BorlandCCompiler, an implementation of the abstract CCompiler class for the Borland C++ compiler. """ # This implementation by Lyle Johnson, based on the original msvccompiler.py # module and using the directions originally published by Gordon Williams. # XXX looks like there's a LOT of overlap between these two classes: # someone should sit down and factor out the common code as # WindowsCCompiler! --GPW import os from distutils.errors import \ DistutilsExecError, \ CompileError, LibError, LinkError, UnknownFileError from distutils.ccompiler import \ CCompiler, gen_preprocess_options from distutils.file_util import write_file from distutils.dep_util import newer from distutils import log class BCPPCompiler(CCompiler) : """Concrete class that implements an interface to the Borland C/C++ compiler, as defined by the CCompiler abstract class. """ compiler_type = 'bcpp' # Just set this so CCompiler's constructor doesn't barf. We currently # don't use the 'set_executables()' bureaucracy provided by CCompiler, # as it really isn't necessary for this sort of single-compiler class. # Would be nice to have a consistent interface with UnixCCompiler, # though, so it's worth thinking about. executables = {} # Private class data (need to distinguish C from C++ source for compiler) _c_extensions = ['.c'] _cpp_extensions = ['.cc', '.cpp', '.cxx'] # Needed for the filename generation methods provided by the # base class, CCompiler. src_extensions = _c_extensions + _cpp_extensions obj_extension = '.obj' static_lib_extension = '.lib' shared_lib_extension = '.dll' static_lib_format = shared_lib_format = '%s%s' exe_extension = '.exe' def __init__ (self, verbose=0, dry_run=0, force=0): CCompiler.__init__ (self, verbose, dry_run, force) # These executables are assumed to all be in the path. # Borland doesn't seem to use any special registry settings to # indicate their installation locations. self.cc = "bcc32.exe" self.linker = "ilink32.exe" self.lib = "tlib.exe" self.preprocess_options = None self.compile_options = ['/tWM', '/O2', '/q', '/g0'] self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0'] self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x'] self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x'] self.ldflags_static = [] self.ldflags_exe = ['/Gn', '/q', '/x'] self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r'] # -- Worker methods ------------------------------------------------ def compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): macros, objects, extra_postargs, pp_opts, build = \ self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs) compile_opts = extra_preargs or [] compile_opts.append ('-c') if debug: compile_opts.extend (self.compile_options_debug) else: compile_opts.extend (self.compile_options) for obj in objects: try: src, ext = build[obj] except KeyError: continue # XXX why do the normpath here? src = os.path.normpath(src) obj = os.path.normpath(obj) # XXX _setup_compile() did a mkpath() too but before the normpath. # Is it possible to skip the normpath? self.mkpath(os.path.dirname(obj)) if ext == '.res': # This is already a binary file -- skip it. continue # the 'for' loop if ext == '.rc': # This needs to be compiled to a .res file -- do it now. try: self.spawn (["brcc32", "-fo", obj, src]) except DistutilsExecError as msg: raise CompileError(msg) continue # the 'for' loop # The next two are both for the real compiler. if ext in self._c_extensions: input_opt = "" elif ext in self._cpp_extensions: input_opt = "-P" else: # Unknown file type -- no extra options. The compiler # will probably fail, but let it just in case this is a # file the compiler recognizes even if we don't. input_opt = "" output_opt = "-o" + obj # Compiler command line syntax is: "bcc32 [options] file(s)". # Note that the source file names must appear at the end of # the command line. try: self.spawn ([self.cc] + compile_opts + pp_opts + [input_opt, output_opt] + extra_postargs + [src]) except DistutilsExecError as msg: raise CompileError(msg) return objects # compile () def create_static_lib (self, objects, output_libname, output_dir=None, debug=0, target_lang=None): (objects, output_dir) = self._fix_object_args (objects, output_dir) output_filename = \ self.library_filename (output_libname, output_dir=output_dir) if self._need_link (objects, output_filename): lib_args = [output_filename, '/u'] + objects if debug: pass # XXX what goes here? try: self.spawn ([self.lib] + lib_args) except DistutilsExecError as msg: raise LibError(msg) else: log.debug("skipping %s (up-to-date)", output_filename) # create_static_lib () def link (self, target_desc, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None): # XXX this ignores 'build_temp'! should follow the lead of # msvccompiler.py (objects, output_dir) = self._fix_object_args (objects, output_dir) (libraries, library_dirs, runtime_library_dirs) = \ self._fix_lib_args (libraries, library_dirs, runtime_library_dirs) if runtime_library_dirs: log.warn("I don't know what to do with 'runtime_library_dirs': %s", str(runtime_library_dirs)) if output_dir is not None: output_filename = os.path.join (output_dir, output_filename) if self._need_link (objects, output_filename): # Figure out linker args based on type of target. if target_desc == CCompiler.EXECUTABLE: startup_obj = 'c0w32' if debug: ld_args = self.ldflags_exe_debug[:] else: ld_args = self.ldflags_exe[:] else: startup_obj = 'c0d32' if debug: ld_args = self.ldflags_shared_debug[:] else: ld_args = self.ldflags_shared[:] # Create a temporary exports file for use by the linker if export_symbols is None: def_file = '' else: head, tail = os.path.split (output_filename) modname, ext = os.path.splitext (tail) temp_dir = os.path.dirname(objects[0]) # preserve tree structure def_file = os.path.join (temp_dir, '%s.def' % modname) contents = ['EXPORTS'] for sym in (export_symbols or []): contents.append(' %s=_%s' % (sym, sym)) self.execute(write_file, (def_file, contents), "writing %s" % def_file) # Borland C++ has problems with '/' in paths objects2 = map(os.path.normpath, objects) # split objects in .obj and .res files # Borland C++ needs them at different positions in the command line objects = [startup_obj] resources = [] for file in objects2: (base, ext) = os.path.splitext(os.path.normcase(file)) if ext == '.res': resources.append(file) else: objects.append(file) for l in library_dirs: ld_args.append("/L%s" % os.path.normpath(l)) ld_args.append("/L.") # we sometimes use relative paths # list of object files ld_args.extend(objects) # XXX the command-line syntax for Borland C++ is a bit wonky; # certain filenames are jammed together in one big string, but # comma-delimited. This doesn't mesh too well with the # Unix-centric attitude (with a DOS/Windows quoting hack) of # 'spawn()', so constructing the argument list is a bit # awkward. Note that doing the obvious thing and jamming all # the filenames and commas into one argument would be wrong, # because 'spawn()' would quote any filenames with spaces in # them. Arghghh!. Apparently it works fine as coded... # name of dll/exe file ld_args.extend([',',output_filename]) # no map file and start libraries ld_args.append(',,') for lib in libraries: # see if we find it and if there is a bcpp specific lib # (xxx_bcpp.lib) libfile = self.find_library_file(library_dirs, lib, debug) if libfile is None: ld_args.append(lib) # probably a BCPP internal library -- don't warn else: # full name which prefers bcpp_xxx.lib over xxx.lib ld_args.append(libfile) # some default libraries ld_args.append ('import32') ld_args.append ('cw32mt') # def file for export symbols ld_args.extend([',',def_file]) # add resource files ld_args.append(',') ld_args.extend(resources) if extra_preargs: ld_args[:0] = extra_preargs if extra_postargs: ld_args.extend(extra_postargs) self.mkpath (os.path.dirname (output_filename)) try: self.spawn ([self.linker] + ld_args) except DistutilsExecError as msg: raise LinkError(msg) else: log.debug("skipping %s (up-to-date)", output_filename) # link () # -- Miscellaneous methods ----------------------------------------- def find_library_file (self, dirs, lib, debug=0): # List of effective library names to try, in order of preference: # xxx_bcpp.lib is better than xxx.lib # and xxx_d.lib is better than xxx.lib if debug is set # # The "_bcpp" suffix is to handle a Python installation for people # with multiple compilers (primarily Distutils hackers, I suspect # ;-). The idea is they'd have one static library for each # compiler they care about, since (almost?) every Windows compiler # seems to have a different format for static libraries. if debug: dlib = (lib + "_d") try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib) else: try_names = (lib + "_bcpp", lib) for dir in dirs: for name in try_names: libfile = os.path.join(dir, self.library_filename(name)) if os.path.exists(libfile): return libfile else: # Oops, didn't find it in *any* of 'dirs' return None # overwrite the one from CCompiler to support rc and res-files def object_filenames (self, source_filenames, strip_dir=0, output_dir=''): if output_dir is None: output_dir = '' obj_names = [] for src_name in source_filenames: # use normcase to make sure '.rc' is really '.rc' and not '.RC' (base, ext) = os.path.splitext (os.path.normcase(src_name)) if ext not in (self.src_extensions + ['.rc','.res']): raise UnknownFileError("unknown file type '%s' (from '%s')" % \ (ext, src_name)) if strip_dir: base = os.path.basename (base) if ext == '.res': # these can go unchanged obj_names.append (os.path.join (output_dir, base + ext)) elif ext == '.rc': # these need to be compiled to .res-files obj_names.append (os.path.join (output_dir, base + '.res')) else: obj_names.append (os.path.join (output_dir, base + self.obj_extension)) return obj_names # object_filenames () def preprocess (self, source, output_file=None, macros=None, include_dirs=None, extra_preargs=None, extra_postargs=None): (_, macros, include_dirs) = \ self._fix_compile_args(None, macros, include_dirs) pp_opts = gen_preprocess_options(macros, include_dirs) pp_args = ['cpp32.exe'] + pp_opts if output_file is not None: pp_args.append('-o' + output_file) if extra_preargs: pp_args[:0] = extra_preargs if extra_postargs: pp_args.extend(extra_postargs) pp_args.append(source) # We need to preprocess: either we're being forced to, or the # source file is newer than the target (or the target doesn't # exist). if self.force or output_file is None or newer(source, output_file): if output_file: self.mkpath(os.path.dirname(output_file)) try: self.spawn(pp_args) except DistutilsExecError as msg: print(msg) raise CompileError(msg) # preprocess()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/build.py
"""distutils.command.build Implements the Distutils 'build' command.""" import sys, os from distutils.core import Command from distutils.errors import DistutilsOptionError from distutils.util import get_platform def show_compilers(): from distutils.ccompiler import show_compilers show_compilers() class build(Command): description = "build everything needed to install" user_options = [ ('build-base=', 'b', "base directory for build library"), ('build-purelib=', None, "build directory for platform-neutral distributions"), ('build-platlib=', None, "build directory for platform-specific distributions"), ('build-lib=', None, "build directory for all distribution (defaults to either " + "build-purelib or build-platlib"), ('build-scripts=', None, "build directory for scripts"), ('build-temp=', 't', "temporary build directory"), ('plat-name=', 'p', "platform name to build for, if supported " "(default: %s)" % get_platform()), ('compiler=', 'c', "specify the compiler type"), ('parallel=', 'j', "number of parallel build jobs"), ('debug', 'g', "compile extensions and libraries with debugging information"), ('force', 'f', "forcibly build everything (ignore file timestamps)"), ('executable=', 'e', "specify final destination interpreter path (build.py)"), ] boolean_options = ['debug', 'force'] help_options = [ ('help-compiler', None, "list available compilers", show_compilers), ] def initialize_options(self): self.build_base = 'build' # these are decided only after 'build_base' has its final value # (unless overridden by the user or client) self.build_purelib = None self.build_platlib = None self.build_lib = None self.build_temp = None self.build_scripts = None self.compiler = None self.plat_name = None self.debug = None self.force = 0 self.executable = None self.parallel = None def finalize_options(self): if self.plat_name is None: self.plat_name = get_platform() else: # plat-name only supported for windows (other platforms are # supported via ./configure flags, if at all). Avoid misleading # other platforms. if os.name != 'nt': raise DistutilsOptionError( "--plat-name only supported on Windows (try " "using './configure --help' on your platform)") plat_specifier = ".%s-%d.%d" % (self.plat_name, *sys.version_info[:2]) # Make it so Python 2.x and Python 2.x with --with-pydebug don't # share the same build directories. Doing so confuses the build # process for C modules if hasattr(sys, 'gettotalrefcount'): plat_specifier += '-pydebug' # 'build_purelib' and 'build_platlib' just default to 'lib' and # 'lib.<plat>' under the base build directory. We only use one of # them for a given distribution, though -- if self.build_purelib is None: self.build_purelib = os.path.join(self.build_base, 'lib') if self.build_platlib is None: self.build_platlib = os.path.join(self.build_base, 'lib' + plat_specifier) # 'build_lib' is the actual directory that we will use for this # particular module distribution -- if user didn't supply it, pick # one of 'build_purelib' or 'build_platlib'. if self.build_lib is None: if self.distribution.ext_modules: self.build_lib = self.build_platlib else: self.build_lib = self.build_purelib # 'build_temp' -- temporary directory for compiler turds, # "build/temp.<plat>" if self.build_temp is None: self.build_temp = os.path.join(self.build_base, 'temp' + plat_specifier) if self.build_scripts is None: self.build_scripts = os.path.join(self.build_base, 'scripts-%d.%d' % sys.version_info[:2]) if self.executable is None and sys.executable: self.executable = os.path.normpath(sys.executable) if isinstance(self.parallel, str): try: self.parallel = int(self.parallel) except ValueError: raise DistutilsOptionError("parallel should be an integer") def run(self): # Run all relevant sub-commands. This will be some subset of: # - build_py - pure Python modules # - build_clib - standalone C libraries # - build_ext - Python extensions # - build_scripts - (Python) scripts for cmd_name in self.get_sub_commands(): self.run_command(cmd_name) # -- Predicates for the sub-command list --------------------------- def has_pure_modules(self): return self.distribution.has_pure_modules() def has_c_libraries(self): return self.distribution.has_c_libraries() def has_ext_modules(self): return self.distribution.has_ext_modules() def has_scripts(self): return self.distribution.has_scripts() sub_commands = [('build_py', has_pure_modules), ('build_clib', has_c_libraries), ('build_ext', has_ext_modules), ('build_scripts', has_scripts), ]
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/build_ext.py
"""distutils.command.build_ext Implements the Distutils 'build_ext' command, for building extension modules (currently limited to C extensions, should accommodate C++ extensions ASAP).""" import contextlib import os import re import sys from distutils.core import Command from distutils.errors import * from distutils.sysconfig import customize_compiler, get_python_version from distutils.sysconfig import get_config_h_filename from distutils.dep_util import newer_group from distutils.extension import Extension from distutils.util import get_platform from distutils import log from site import USER_BASE # An extension name is just a dot-separated list of Python NAMEs (ie. # the same as a fully-qualified module name). extension_name_re = re.compile \ (r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$') def show_compilers (): from distutils.ccompiler import show_compilers show_compilers() class build_ext(Command): description = "build C/C++ extensions (compile/link to build directory)" # XXX thoughts on how to deal with complex command-line options like # these, i.e. how to make it so fancy_getopt can suck them off the # command line and make it look like setup.py defined the appropriate # lists of tuples of what-have-you. # - each command needs a callback to process its command-line options # - Command.__init__() needs access to its share of the whole # command line (must ultimately come from # Distribution.parse_command_line()) # - it then calls the current command class' option-parsing # callback to deal with weird options like -D, which have to # parse the option text and churn out some custom data # structure # - that data structure (in this case, a list of 2-tuples) # will then be present in the command object by the time # we get to finalize_options() (i.e. the constructor # takes care of both command-line and client options # in between initialize_options() and finalize_options()) sep_by = " (separated by '%s')" % os.pathsep user_options = [ ('build-lib=', 'b', "directory for compiled extension modules"), ('build-temp=', 't', "directory for temporary files (build by-products)"), ('plat-name=', 'p', "platform name to cross-compile for, if supported " "(default: %s)" % get_platform()), ('inplace', 'i', "ignore build-lib and put compiled extensions into the source " + "directory alongside your pure Python modules"), ('include-dirs=', 'I', "list of directories to search for header files" + sep_by), ('define=', 'D', "C preprocessor macros to define"), ('undef=', 'U', "C preprocessor macros to undefine"), ('libraries=', 'l', "external C libraries to link with"), ('library-dirs=', 'L', "directories to search for external C libraries" + sep_by), ('rpath=', 'R', "directories to search for shared C libraries at runtime"), ('link-objects=', 'O', "extra explicit link objects to include in the link"), ('debug', 'g', "compile/link with debugging information"), ('force', 'f', "forcibly build everything (ignore file timestamps)"), ('compiler=', 'c', "specify the compiler type"), ('parallel=', 'j', "number of parallel build jobs"), ('swig-cpp', None, "make SWIG create C++ files (default is C)"), ('swig-opts=', None, "list of SWIG command line options"), ('swig=', None, "path to the SWIG executable"), ('user', None, "add user include, library and rpath") ] boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user'] help_options = [ ('help-compiler', None, "list available compilers", show_compilers), ] def initialize_options(self): self.extensions = None self.build_lib = None self.plat_name = None self.build_temp = None self.inplace = 0 self.package = None self.include_dirs = None self.define = None self.undef = None self.libraries = None self.library_dirs = None self.rpath = None self.link_objects = None self.debug = None self.force = None self.compiler = None self.swig = None self.swig_cpp = None self.swig_opts = None self.user = None self.parallel = None def finalize_options(self): from distutils import sysconfig self.set_undefined_options('build', ('build_lib', 'build_lib'), ('build_temp', 'build_temp'), ('compiler', 'compiler'), ('debug', 'debug'), ('force', 'force'), ('parallel', 'parallel'), ('plat_name', 'plat_name'), ) if self.package is None: self.package = self.distribution.ext_package self.extensions = self.distribution.ext_modules # Make sure Python's include directories (for Python.h, pyconfig.h, # etc.) are in the include search path. py_include = sysconfig.get_python_inc() plat_py_include = sysconfig.get_python_inc(plat_specific=1) if self.include_dirs is None: self.include_dirs = self.distribution.include_dirs or [] if isinstance(self.include_dirs, str): self.include_dirs = self.include_dirs.split(os.pathsep) # If in a virtualenv, add its include directory # Issue 16116 if sys.exec_prefix != sys.base_exec_prefix: self.include_dirs.append(os.path.join(sys.exec_prefix, 'include')) # Put the Python "system" include dir at the end, so that # any local include dirs take precedence. self.include_dirs.extend(py_include.split(os.path.pathsep)) if plat_py_include != py_include: self.include_dirs.extend( plat_py_include.split(os.path.pathsep)) self.ensure_string_list('libraries') self.ensure_string_list('link_objects') # Life is easier if we're not forever checking for None, so # simplify these options to empty lists if unset if self.libraries is None: self.libraries = [] if self.library_dirs is None: self.library_dirs = [] elif isinstance(self.library_dirs, str): self.library_dirs = self.library_dirs.split(os.pathsep) if self.rpath is None: self.rpath = [] elif isinstance(self.rpath, str): self.rpath = self.rpath.split(os.pathsep) # for extensions under windows use different directories # for Release and Debug builds. # also Python's library directory must be appended to library_dirs if os.name == 'nt': # the 'libs' directory is for binary installs - we assume that # must be the *native* platform. But we don't really support # cross-compiling via a binary install anyway, so we let it go. self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs')) if sys.base_exec_prefix != sys.prefix: # Issue 16116 self.library_dirs.append(os.path.join(sys.base_exec_prefix, 'libs')) if self.debug: self.build_temp = os.path.join(self.build_temp, "Debug") else: self.build_temp = os.path.join(self.build_temp, "Release") # Append the source distribution include and library directories, # this allows distutils on windows to work in the source tree self.include_dirs.append(os.path.dirname(get_config_h_filename())) _sys_home = getattr(sys, '_home', None) if _sys_home: self.library_dirs.append(_sys_home) # Use the .lib files for the correct architecture if self.plat_name == 'win32': suffix = 'win32' else: # win-amd64 suffix = self.plat_name[4:] new_lib = os.path.join(sys.exec_prefix, 'PCbuild') if suffix: new_lib = os.path.join(new_lib, suffix) self.library_dirs.append(new_lib) # For extensions under Cygwin, Python's library directory must be # appended to library_dirs if sys.platform[:6] == 'cygwin': if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")): # building third party extensions self.library_dirs.append(os.path.join(sys.prefix, "lib", "python" + get_python_version(), "config")) else: # building python standard extensions self.library_dirs.append('.') # For building extensions with a shared Python library, # Python's library directory must be appended to library_dirs # See Issues: #1600860, #4366 if (sysconfig.get_config_var('Py_ENABLE_SHARED')): if not sysconfig.python_build: # building third party extensions self.library_dirs.append(sysconfig.get_config_var('LIBDIR')) else: # building python standard extensions self.library_dirs.append('.') # The argument parsing will result in self.define being a string, but # it has to be a list of 2-tuples. All the preprocessor symbols # specified by the 'define' option will be set to '1'. Multiple # symbols can be separated with commas. if self.define: defines = self.define.split(',') self.define = [(symbol, '1') for symbol in defines] # The option for macros to undefine is also a string from the # option parsing, but has to be a list. Multiple symbols can also # be separated with commas here. if self.undef: self.undef = self.undef.split(',') if self.swig_opts is None: self.swig_opts = [] else: self.swig_opts = self.swig_opts.split(' ') # Finally add the user include and library directories if requested if self.user: user_include = os.path.join(USER_BASE, "include") user_lib = os.path.join(USER_BASE, "lib") if os.path.isdir(user_include): self.include_dirs.append(user_include) if os.path.isdir(user_lib): self.library_dirs.append(user_lib) self.rpath.append(user_lib) if isinstance(self.parallel, str): try: self.parallel = int(self.parallel) except ValueError: raise DistutilsOptionError("parallel should be an integer") def run(self): from distutils.ccompiler import new_compiler # 'self.extensions', as supplied by setup.py, is a list of # Extension instances. See the documentation for Extension (in # distutils.extension) for details. # # For backwards compatibility with Distutils 0.8.2 and earlier, we # also allow the 'extensions' list to be a list of tuples: # (ext_name, build_info) # where build_info is a dictionary containing everything that # Extension instances do except the name, with a few things being # differently named. We convert these 2-tuples to Extension # instances as needed. if not self.extensions: return # If we were asked to build any C/C++ libraries, make sure that the # directory where we put them is in the library search path for # linking extensions. if self.distribution.has_c_libraries(): build_clib = self.get_finalized_command('build_clib') self.libraries.extend(build_clib.get_library_names() or []) self.library_dirs.append(build_clib.build_clib) # Setup the CCompiler object that we'll use to do all the # compiling and linking self.compiler = new_compiler(compiler=self.compiler, verbose=self.verbose, dry_run=self.dry_run, force=self.force) customize_compiler(self.compiler) # If we are cross-compiling, init the compiler now (if we are not # cross-compiling, init would not hurt, but people may rely on # late initialization of compiler even if they shouldn't...) if os.name == 'nt' and self.plat_name != get_platform(): self.compiler.initialize(self.plat_name) # And make sure that any compile/link-related options (which might # come from the command-line or from the setup script) are set in # that CCompiler object -- that way, they automatically apply to # all compiling and linking done here. if self.include_dirs is not None: self.compiler.set_include_dirs(self.include_dirs) if self.define is not None: # 'define' option is a list of (name,value) tuples for (name, value) in self.define: self.compiler.define_macro(name, value) if self.undef is not None: for macro in self.undef: self.compiler.undefine_macro(macro) if self.libraries is not None: self.compiler.set_libraries(self.libraries) if self.library_dirs is not None: self.compiler.set_library_dirs(self.library_dirs) if self.rpath is not None: self.compiler.set_runtime_library_dirs(self.rpath) if self.link_objects is not None: self.compiler.set_link_objects(self.link_objects) # Now actually compile and link everything. self.build_extensions() def check_extensions_list(self, extensions): """Ensure that the list of extensions (presumably provided as a command option 'extensions') is valid, i.e. it is a list of Extension objects. We also support the old-style list of 2-tuples, where the tuples are (ext_name, build_info), which are converted to Extension instances here. Raise DistutilsSetupError if the structure is invalid anywhere; just returns otherwise. """ if not isinstance(extensions, list): raise DistutilsSetupError( "'ext_modules' option must be a list of Extension instances") for i, ext in enumerate(extensions): if isinstance(ext, Extension): continue # OK! (assume type-checking done # by Extension constructor) if not isinstance(ext, tuple) or len(ext) != 2: raise DistutilsSetupError( "each element of 'ext_modules' option must be an " "Extension instance or 2-tuple") ext_name, build_info = ext log.warn("old-style (ext_name, build_info) tuple found in " "ext_modules for extension '%s' " "-- please convert to Extension instance", ext_name) if not (isinstance(ext_name, str) and extension_name_re.match(ext_name)): raise DistutilsSetupError( "first element of each tuple in 'ext_modules' " "must be the extension name (a string)") if not isinstance(build_info, dict): raise DistutilsSetupError( "second element of each tuple in 'ext_modules' " "must be a dictionary (build info)") # OK, the (ext_name, build_info) dict is type-safe: convert it # to an Extension instance. ext = Extension(ext_name, build_info['sources']) # Easy stuff: one-to-one mapping from dict elements to # instance attributes. for key in ('include_dirs', 'library_dirs', 'libraries', 'extra_objects', 'extra_compile_args', 'extra_link_args'): val = build_info.get(key) if val is not None: setattr(ext, key, val) # Medium-easy stuff: same syntax/semantics, different names. ext.runtime_library_dirs = build_info.get('rpath') if 'def_file' in build_info: log.warn("'def_file' element of build info dict " "no longer supported") # Non-trivial stuff: 'macros' split into 'define_macros' # and 'undef_macros'. macros = build_info.get('macros') if macros: ext.define_macros = [] ext.undef_macros = [] for macro in macros: if not (isinstance(macro, tuple) and len(macro) in (1, 2)): raise DistutilsSetupError( "'macros' element of build info dict " "must be 1- or 2-tuple") if len(macro) == 1: ext.undef_macros.append(macro[0]) elif len(macro) == 2: ext.define_macros.append(macro) extensions[i] = ext def get_source_files(self): self.check_extensions_list(self.extensions) filenames = [] # Wouldn't it be neat if we knew the names of header files too... for ext in self.extensions: filenames.extend(ext.sources) return filenames def get_outputs(self): # Sanity check the 'extensions' list -- can't assume this is being # done in the same run as a 'build_extensions()' call (in fact, we # can probably assume that it *isn't*!). self.check_extensions_list(self.extensions) # And build the list of output (built) filenames. Note that this # ignores the 'inplace' flag, and assumes everything goes in the # "build" tree. outputs = [] for ext in self.extensions: outputs.append(self.get_ext_fullpath(ext.name)) return outputs def build_extensions(self): # First, sanity-check the 'extensions' list self.check_extensions_list(self.extensions) if self.parallel: self._build_extensions_parallel() else: self._build_extensions_serial() def _build_extensions_parallel(self): workers = self.parallel if self.parallel is True: workers = os.cpu_count() # may return None try: from concurrent.futures import ThreadPoolExecutor except ImportError: workers = None if workers is None: self._build_extensions_serial() return with ThreadPoolExecutor(max_workers=workers) as executor: futures = [executor.submit(self.build_extension, ext) for ext in self.extensions] for ext, fut in zip(self.extensions, futures): with self._filter_build_errors(ext): fut.result() def _build_extensions_serial(self): for ext in self.extensions: with self._filter_build_errors(ext): self.build_extension(ext) @contextlib.contextmanager def _filter_build_errors(self, ext): try: yield except (CCompilerError, DistutilsError, CompileError) as e: if not ext.optional: raise self.warn('building extension "%s" failed: %s' % (ext.name, e)) def build_extension(self, ext): sources = ext.sources if sources is None or not isinstance(sources, (list, tuple)): raise DistutilsSetupError( "in 'ext_modules' option (extension '%s'), " "'sources' must be present and must be " "a list of source filenames" % ext.name) # sort to make the resulting .so file build reproducible sources = sorted(sources) ext_path = self.get_ext_fullpath(ext.name) depends = sources + ext.depends if not (self.force or newer_group(depends, ext_path, 'newer')): log.debug("skipping '%s' extension (up-to-date)", ext.name) return else: log.info("building '%s' extension", ext.name) # First, scan the sources for SWIG definition files (.i), run # SWIG on 'em to create .c files, and modify the sources list # accordingly. sources = self.swig_sources(sources, ext) # Next, compile the source code to object files. # XXX not honouring 'define_macros' or 'undef_macros' -- the # CCompiler API needs to change to accommodate this, and I # want to do one thing at a time! # Two possible sources for extra compiler arguments: # - 'extra_compile_args' in Extension object # - CFLAGS environment variable (not particularly # elegant, but people seem to expect it and I # guess it's useful) # The environment variable should take precedence, and # any sensible compiler will give precedence to later # command line args. Hence we combine them in order: extra_args = ext.extra_compile_args or [] macros = ext.define_macros[:] for undef in ext.undef_macros: macros.append((undef,)) objects = self.compiler.compile(sources, output_dir=self.build_temp, macros=macros, include_dirs=ext.include_dirs, debug=self.debug, extra_postargs=extra_args, depends=ext.depends) # XXX outdated variable, kept here in case third-part code # needs it. self._built_objects = objects[:] # Now link the object files together into a "shared object" -- # of course, first we have to figure out all the other things # that go into the mix. if ext.extra_objects: objects.extend(ext.extra_objects) extra_args = ext.extra_link_args or [] # Detect target language, if not provided language = ext.language or self.compiler.detect_language(sources) self.compiler.link_shared_object( objects, ext_path, libraries=self.get_libraries(ext), library_dirs=ext.library_dirs, runtime_library_dirs=ext.runtime_library_dirs, extra_postargs=extra_args, export_symbols=self.get_export_symbols(ext), debug=self.debug, build_temp=self.build_temp, target_lang=language) def swig_sources(self, sources, extension): """Walk the list of source files in 'sources', looking for SWIG interface (.i) files. Run SWIG on all that are found, and return a modified 'sources' list with SWIG source files replaced by the generated C (or C++) files. """ new_sources = [] swig_sources = [] swig_targets = {} # XXX this drops generated C/C++ files into the source tree, which # is fine for developers who want to distribute the generated # source -- but there should be an option to put SWIG output in # the temp dir. if self.swig_cpp: log.warn("--swig-cpp is deprecated - use --swig-opts=-c++") if self.swig_cpp or ('-c++' in self.swig_opts) or \ ('-c++' in extension.swig_opts): target_ext = '.cpp' else: target_ext = '.c' for source in sources: (base, ext) = os.path.splitext(source) if ext == ".i": # SWIG interface file new_sources.append(base + '_wrap' + target_ext) swig_sources.append(source) swig_targets[source] = new_sources[-1] else: new_sources.append(source) if not swig_sources: return new_sources swig = self.swig or self.find_swig() swig_cmd = [swig, "-python"] swig_cmd.extend(self.swig_opts) if self.swig_cpp: swig_cmd.append("-c++") # Do not override commandline arguments if not self.swig_opts: for o in extension.swig_opts: swig_cmd.append(o) for source in swig_sources: target = swig_targets[source] log.info("swigging %s to %s", source, target) self.spawn(swig_cmd + ["-o", target, source]) return new_sources def find_swig(self): """Return the name of the SWIG executable. On Unix, this is just "swig" -- it should be in the PATH. Tries a bit harder on Windows. """ if os.name == "posix": return "swig" elif os.name == "nt": # Look for SWIG in its standard installation directory on # Windows (or so I presume!). If we find it there, great; # if not, act like Unix and assume it's in the PATH. for vers in ("1.3", "1.2", "1.1"): fn = os.path.join("c:\\swig%s" % vers, "swig.exe") if os.path.isfile(fn): return fn else: return "swig.exe" else: raise DistutilsPlatformError( "I don't know how to find (much less run) SWIG " "on platform '%s'" % os.name) # -- Name generators ----------------------------------------------- # (extension names, filenames, whatever) def get_ext_fullpath(self, ext_name): """Returns the path of the filename for a given extension. The file is located in `build_lib` or directly in the package (inplace option). """ fullname = self.get_ext_fullname(ext_name) modpath = fullname.split('.') filename = self.get_ext_filename(modpath[-1]) if not self.inplace: # no further work needed # returning : # build_dir/package/path/filename filename = os.path.join(*modpath[:-1]+[filename]) return os.path.join(self.build_lib, filename) # the inplace option requires to find the package directory # using the build_py command for that package = '.'.join(modpath[0:-1]) build_py = self.get_finalized_command('build_py') package_dir = os.path.abspath(build_py.get_package_dir(package)) # returning # package_dir/filename return os.path.join(package_dir, filename) def get_ext_fullname(self, ext_name): """Returns the fullname of a given extension name. Adds the `package.` prefix""" if self.package is None: return ext_name else: return self.package + '.' + ext_name def get_ext_filename(self, ext_name): r"""Convert the name of an extension (eg. "foo.bar") into the name of the file from which it will be loaded (eg. "foo/bar.so", or "foo\bar.pyd"). """ from distutils.sysconfig import get_config_var ext_path = ext_name.split('.') ext_suffix = get_config_var('EXT_SUFFIX') return os.path.join(*ext_path) + ext_suffix def get_export_symbols(self, ext): """Return the list of symbols that a shared extension has to export. This either uses 'ext.export_symbols' or, if it's not provided, "PyInit_" + module_name. Only relevant on Windows, where the .pyd file (DLL) must export the module "PyInit_" function. """ suffix = '_' + ext.name.split('.')[-1] try: # Unicode module name support as defined in PEP-489 # https://www.python.org/dev/peps/pep-0489/#export-hook-name suffix.encode('ascii') except UnicodeEncodeError: suffix = 'U' + suffix.encode('punycode').replace(b'-', b'_').decode('ascii') initfunc_name = "PyInit" + suffix if initfunc_name not in ext.export_symbols: ext.export_symbols.append(initfunc_name) return ext.export_symbols def get_libraries(self, ext): """Return the list of libraries to link against when building a shared extension. On most platforms, this is just 'ext.libraries'; on Windows, we add the Python library (eg. python20.dll). """ # The python library is always needed on Windows. For MSVC, this # is redundant, since the library is mentioned in a pragma in # pyconfig.h that MSVC groks. The other Windows compilers all seem # to need it mentioned explicitly, though, so that's what we do. # Append '_d' to the python import library on debug builds. if sys.platform == "win32": from distutils._msvccompiler import MSVCCompiler if not isinstance(self.compiler, MSVCCompiler): template = "python%d%d" if self.debug: template = template + '_d' pythonlib = (template % (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) # don't extend ext.libraries, it may be shared with other # extensions, it is a reference to the original list return ext.libraries + [pythonlib] else: # On Android only the main executable and LD_PRELOADs are considered # to be RTLD_GLOBAL, all the dependencies of the main executable # remain RTLD_LOCAL and so the shared libraries must be linked with # libpython when python is built with a shared python library (issue # bpo-21536). # On Cygwin (and if required, other POSIX-like platforms based on # Windows like MinGW) it is simply necessary that all symbols in # shared libraries are resolved at link time. from distutils.sysconfig import get_config_var link_libpython = False if get_config_var('Py_ENABLE_SHARED'): # A native build on an Android device or on Cygwin if hasattr(sys, 'getandroidapilevel'): link_libpython = True elif sys.platform == 'cygwin': link_libpython = True elif '_PYTHON_HOST_PLATFORM' in os.environ: # We are cross-compiling for one of the relevant platforms if get_config_var('ANDROID_API_LEVEL') != 0: link_libpython = True elif get_config_var('MACHDEP') == 'cygwin': link_libpython = True if link_libpython: ldversion = get_config_var('LDVERSION') return ext.libraries + ['python' + ldversion] return ext.libraries
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/config.py
"""distutils.command.config Implements the Distutils 'config' command, a (mostly) empty command class that exists mainly to be sub-classed by specific module distributions and applications. The idea is that while every "config" command is different, at least they're all named the same, and users always see "config" in the list of standard commands. Also, this is a good place to put common configure-like tasks: "try to compile this C code", or "figure out where this header file lives". """ import os, re from distutils.core import Command from distutils.errors import DistutilsExecError from distutils.sysconfig import customize_compiler from distutils import log LANG_EXT = {"c": ".c", "c++": ".cxx"} class config(Command): description = "prepare to build" user_options = [ ('compiler=', None, "specify the compiler type"), ('cc=', None, "specify the compiler executable"), ('include-dirs=', 'I', "list of directories to search for header files"), ('define=', 'D', "C preprocessor macros to define"), ('undef=', 'U', "C preprocessor macros to undefine"), ('libraries=', 'l', "external C libraries to link with"), ('library-dirs=', 'L', "directories to search for external C libraries"), ('noisy', None, "show every action (compile, link, run, ...) taken"), ('dump-source', None, "dump generated source files before attempting to compile them"), ] # The three standard command methods: since the "config" command # does nothing by default, these are empty. def initialize_options(self): self.compiler = None self.cc = None self.include_dirs = None self.libraries = None self.library_dirs = None # maximal output for now self.noisy = 1 self.dump_source = 1 # list of temporary files generated along-the-way that we have # to clean at some point self.temp_files = [] def finalize_options(self): if self.include_dirs is None: self.include_dirs = self.distribution.include_dirs or [] elif isinstance(self.include_dirs, str): self.include_dirs = self.include_dirs.split(os.pathsep) if self.libraries is None: self.libraries = [] elif isinstance(self.libraries, str): self.libraries = [self.libraries] if self.library_dirs is None: self.library_dirs = [] elif isinstance(self.library_dirs, str): self.library_dirs = self.library_dirs.split(os.pathsep) def run(self): pass # Utility methods for actual "config" commands. The interfaces are # loosely based on Autoconf macros of similar names. Sub-classes # may use these freely. def _check_compiler(self): """Check that 'self.compiler' really is a CCompiler object; if not, make it one. """ # We do this late, and only on-demand, because this is an expensive # import. from distutils.ccompiler import CCompiler, new_compiler if not isinstance(self.compiler, CCompiler): self.compiler = new_compiler(compiler=self.compiler, dry_run=self.dry_run, force=1) customize_compiler(self.compiler) if self.include_dirs: self.compiler.set_include_dirs(self.include_dirs) if self.libraries: self.compiler.set_libraries(self.libraries) if self.library_dirs: self.compiler.set_library_dirs(self.library_dirs) def _gen_temp_sourcefile(self, body, headers, lang): filename = "_configtest" + LANG_EXT[lang] with open(filename, "w") as file: if headers: for header in headers: file.write("#include <%s>\n" % header) file.write("\n") file.write(body) if body[-1] != "\n": file.write("\n") return filename def _preprocess(self, body, headers, include_dirs, lang): src = self._gen_temp_sourcefile(body, headers, lang) out = "_configtest.i" self.temp_files.extend([src, out]) self.compiler.preprocess(src, out, include_dirs=include_dirs) return (src, out) def _compile(self, body, headers, include_dirs, lang): src = self._gen_temp_sourcefile(body, headers, lang) if self.dump_source: dump_file(src, "compiling '%s':" % src) (obj,) = self.compiler.object_filenames([src]) self.temp_files.extend([src, obj]) self.compiler.compile([src], include_dirs=include_dirs) return (src, obj) def _link(self, body, headers, include_dirs, libraries, library_dirs, lang): (src, obj) = self._compile(body, headers, include_dirs, lang) prog = os.path.splitext(os.path.basename(src))[0] self.compiler.link_executable([obj], prog, libraries=libraries, library_dirs=library_dirs, target_lang=lang) if self.compiler.exe_extension is not None: prog = prog + self.compiler.exe_extension self.temp_files.append(prog) return (src, obj, prog) def _clean(self, *filenames): if not filenames: filenames = self.temp_files self.temp_files = [] log.info("removing: %s", ' '.join(filenames)) for filename in filenames: try: os.remove(filename) except OSError: pass # XXX these ignore the dry-run flag: what to do, what to do? even if # you want a dry-run build, you still need some sort of configuration # info. My inclination is to make it up to the real config command to # consult 'dry_run', and assume a default (minimal) configuration if # true. The problem with trying to do it here is that you'd have to # return either true or false from all the 'try' methods, neither of # which is correct. # XXX need access to the header search path and maybe default macros. def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"): """Construct a source file from 'body' (a string containing lines of C/C++ code) and 'headers' (a list of header files to include) and run it through the preprocessor. Return true if the preprocessor succeeded, false if there were any errors. ('body' probably isn't of much use, but what the heck.) """ from distutils.ccompiler import CompileError self._check_compiler() ok = True try: self._preprocess(body, headers, include_dirs, lang) except CompileError: ok = False self._clean() return ok def search_cpp(self, pattern, body=None, headers=None, include_dirs=None, lang="c"): """Construct a source file (just like 'try_cpp()'), run it through the preprocessor, and return true if any line of the output matches 'pattern'. 'pattern' should either be a compiled regex object or a string containing a regex. If both 'body' and 'headers' are None, preprocesses an empty file -- which can be useful to determine the symbols the preprocessor and compiler set by default. """ self._check_compiler() src, out = self._preprocess(body, headers, include_dirs, lang) if isinstance(pattern, str): pattern = re.compile(pattern) with open(out) as file: match = False while True: line = file.readline() if line == '': break if pattern.search(line): match = True break self._clean() return match def try_compile(self, body, headers=None, include_dirs=None, lang="c"): """Try to compile a source file built from 'body' and 'headers'. Return true on success, false otherwise. """ from distutils.ccompiler import CompileError self._check_compiler() try: self._compile(body, headers, include_dirs, lang) ok = True except CompileError: ok = False log.info(ok and "success!" or "failure.") self._clean() return ok def try_link(self, body, headers=None, include_dirs=None, libraries=None, library_dirs=None, lang="c"): """Try to compile and link a source file, built from 'body' and 'headers', to executable form. Return true on success, false otherwise. """ from distutils.ccompiler import CompileError, LinkError self._check_compiler() try: self._link(body, headers, include_dirs, libraries, library_dirs, lang) ok = True except (CompileError, LinkError): ok = False log.info(ok and "success!" or "failure.") self._clean() return ok def try_run(self, body, headers=None, include_dirs=None, libraries=None, library_dirs=None, lang="c"): """Try to compile, link to an executable, and run a program built from 'body' and 'headers'. Return true on success, false otherwise. """ from distutils.ccompiler import CompileError, LinkError self._check_compiler() try: src, obj, exe = self._link(body, headers, include_dirs, libraries, library_dirs, lang) self.spawn([exe]) ok = True except (CompileError, LinkError, DistutilsExecError): ok = False log.info(ok and "success!" or "failure.") self._clean() return ok # -- High-level methods -------------------------------------------- # (these are the ones that are actually likely to be useful # when implementing a real-world config command!) def check_func(self, func, headers=None, include_dirs=None, libraries=None, library_dirs=None, decl=0, call=0): """Determine if function 'func' is available by constructing a source file that refers to 'func', and compiles and links it. If everything succeeds, returns true; otherwise returns false. The constructed source file starts out by including the header files listed in 'headers'. If 'decl' is true, it then declares 'func' (as "int func()"); you probably shouldn't supply 'headers' and set 'decl' true in the same call, or you might get errors about a conflicting declarations for 'func'. Finally, the constructed 'main()' function either references 'func' or (if 'call' is true) calls it. 'libraries' and 'library_dirs' are used when linking. """ self._check_compiler() body = [] if decl: body.append("int %s ();" % func) body.append("int main () {") if call: body.append(" %s();" % func) else: body.append(" %s;" % func) body.append("}") body = "\n".join(body) + "\n" return self.try_link(body, headers, include_dirs, libraries, library_dirs) def check_lib(self, library, library_dirs=None, headers=None, include_dirs=None, other_libraries=[]): """Determine if 'library' is available to be linked against, without actually checking that any particular symbols are provided by it. 'headers' will be used in constructing the source file to be compiled, but the only effect of this is to check if all the header files listed are available. Any libraries listed in 'other_libraries' will be included in the link, in case 'library' has symbols that depend on other libraries. """ self._check_compiler() return self.try_link("int main (void) { }", headers, include_dirs, [library] + other_libraries, library_dirs) def check_header(self, header, include_dirs=None, library_dirs=None, lang="c"): """Determine if the system header file named by 'header_file' exists and can be found by the preprocessor; return true if so, false otherwise. """ return self.try_cpp(body="/* No body */", headers=[header], include_dirs=include_dirs) def dump_file(filename, head=None): """Dumps a file content into log.info. If head is not None, will be dumped before the file content. """ if head is None: log.info('%s', filename) else: log.info(head) file = open(filename) try: log.info(file.read()) finally: file.close()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/clean.py
"""distutils.command.clean Implements the Distutils 'clean' command.""" # contributed by Bastian Kleineidam <calvin@cs.uni-sb.de>, added 2000-03-18 import os from distutils.core import Command from distutils.dir_util import remove_tree from distutils import log class clean(Command): description = "clean up temporary files from 'build' command" user_options = [ ('build-base=', 'b', "base build directory (default: 'build.build-base')"), ('build-lib=', None, "build directory for all modules (default: 'build.build-lib')"), ('build-temp=', 't', "temporary build directory (default: 'build.build-temp')"), ('build-scripts=', None, "build directory for scripts (default: 'build.build-scripts')"), ('bdist-base=', None, "temporary directory for built distributions"), ('all', 'a', "remove all build output, not just temporary by-products") ] boolean_options = ['all'] def initialize_options(self): self.build_base = None self.build_lib = None self.build_temp = None self.build_scripts = None self.bdist_base = None self.all = None def finalize_options(self): self.set_undefined_options('build', ('build_base', 'build_base'), ('build_lib', 'build_lib'), ('build_scripts', 'build_scripts'), ('build_temp', 'build_temp')) self.set_undefined_options('bdist', ('bdist_base', 'bdist_base')) def run(self): # remove the build/temp.<plat> directory (unless it's already # gone) if os.path.exists(self.build_temp): remove_tree(self.build_temp, dry_run=self.dry_run) else: log.debug("'%s' does not exist -- can't clean it", self.build_temp) if self.all: # remove build directories for directory in (self.build_lib, self.bdist_base, self.build_scripts): if os.path.exists(directory): remove_tree(directory, dry_run=self.dry_run) else: log.warn("'%s' does not exist -- can't clean it", directory) # just for the heck of it, try to remove the base build directory: # we might have emptied it right now, but if not we don't care if not self.dry_run: try: os.rmdir(self.build_base) log.info("removing '%s'", self.build_base) except OSError: pass
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/check.py
"""distutils.command.check Implements the Distutils 'check' command. """ from distutils.core import Command from distutils.errors import DistutilsSetupError try: # docutils is installed from docutils.utils import Reporter from docutils.parsers.rst import Parser from docutils import frontend from docutils import nodes class SilentReporter(Reporter): def __init__(self, source, report_level, halt_level, stream=None, debug=0, encoding='ascii', error_handler='replace'): self.messages = [] Reporter.__init__(self, source, report_level, halt_level, stream, debug, encoding, error_handler) def system_message(self, level, message, *children, **kwargs): self.messages.append((level, message, children, kwargs)) return nodes.system_message(message, level=level, type=self.levels[level], *children, **kwargs) HAS_DOCUTILS = True except Exception: # Catch all exceptions because exceptions besides ImportError probably # indicate that docutils is not ported to Py3k. HAS_DOCUTILS = False class check(Command): """This command checks the meta-data of the package. """ description = ("perform some checks on the package") user_options = [('metadata', 'm', 'Verify meta-data'), ('restructuredtext', 'r', ('Checks if long string meta-data syntax ' 'are reStructuredText-compliant')), ('strict', 's', 'Will exit with an error if a check fails')] boolean_options = ['metadata', 'restructuredtext', 'strict'] def initialize_options(self): """Sets default values for options.""" self.restructuredtext = 0 self.metadata = 1 self.strict = 0 self._warnings = 0 def finalize_options(self): pass def warn(self, msg): """Counts the number of warnings that occurs.""" self._warnings += 1 return Command.warn(self, msg) def run(self): """Runs the command.""" # perform the various tests if self.metadata: self.check_metadata() if self.restructuredtext: if HAS_DOCUTILS: self.check_restructuredtext() elif self.strict: raise DistutilsSetupError('The docutils package is needed.') # let's raise an error in strict mode, if we have at least # one warning if self.strict and self._warnings > 0: raise DistutilsSetupError('Please correct your package.') def check_metadata(self): """Ensures that all required elements of meta-data are supplied. Required fields: name, version, URL Recommended fields: (author and author_email) or (maintainer and maintainer_email)) Warns if any are missing. """ metadata = self.distribution.metadata missing = [] for attr in ('name', 'version', 'url'): if not (hasattr(metadata, attr) and getattr(metadata, attr)): missing.append(attr) if missing: self.warn("missing required meta-data: %s" % ', '.join(missing)) if metadata.author: if not metadata.author_email: self.warn("missing meta-data: if 'author' supplied, " + "'author_email' should be supplied too") elif metadata.maintainer: if not metadata.maintainer_email: self.warn("missing meta-data: if 'maintainer' supplied, " + "'maintainer_email' should be supplied too") else: self.warn("missing meta-data: either (author and author_email) " + "or (maintainer and maintainer_email) " + "should be supplied") def check_restructuredtext(self): """Checks if the long string fields are reST-compliant.""" data = self.distribution.get_long_description() for warning in self._check_rst_data(data): line = warning[-1].get('line') if line is None: warning = warning[1] else: warning = '%s (line %s)' % (warning[1], line) self.warn(warning) def _check_rst_data(self, data): """Returns warnings when the provided data doesn't compile.""" # the include and csv_table directives need this to be a path source_path = self.distribution.script_name or 'setup.py' parser = Parser() settings = frontend.OptionParser(components=(Parser,)).get_default_values() settings.tab_width = 4 settings.pep_references = None settings.rfc_references = None reporter = SilentReporter(source_path, settings.report_level, settings.halt_level, stream=settings.warning_stream, debug=settings.debug, encoding=settings.error_encoding, error_handler=settings.error_encoding_error_handler) document = nodes.document(settings, reporter, source=source_path) document.note_source(source_path, -1) try: parser.parse(data, document) except AttributeError as e: reporter.messages.append( (-1, 'Could not finish the parsing: %s.' % e, '', {})) return reporter.messages
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/install_scripts.py
"""distutils.command.install_scripts Implements the Distutils 'install_scripts' command, for installing Python scripts.""" # contributed by Bastian Kleineidam import os from distutils.core import Command from distutils import log from stat import ST_MODE class install_scripts(Command): description = "install scripts (Python or otherwise)" user_options = [ ('install-dir=', 'd', "directory to install scripts to"), ('build-dir=','b', "build directory (where to install from)"), ('force', 'f', "force installation (overwrite existing files)"), ('skip-build', None, "skip the build steps"), ] boolean_options = ['force', 'skip-build'] def initialize_options(self): self.install_dir = None self.force = 0 self.build_dir = None self.skip_build = None def finalize_options(self): self.set_undefined_options('build', ('build_scripts', 'build_dir')) self.set_undefined_options('install', ('install_scripts', 'install_dir'), ('force', 'force'), ('skip_build', 'skip_build'), ) def run(self): if not self.skip_build: self.run_command('build_scripts') self.outfiles = self.copy_tree(self.build_dir, self.install_dir) if os.name == 'posix': # Set the executable bits (owner, group, and world) on # all the scripts we just installed. for file in self.get_outputs(): if self.dry_run: log.info("changing mode of %s", file) else: mode = ((os.stat(file)[ST_MODE]) | 0o555) & 0o7777 log.info("changing mode of %s to %o", file, mode) os.chmod(file, mode) def get_inputs(self): return self.distribution.scripts or [] def get_outputs(self): return self.outfiles or []
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/upload.py
""" distutils.command.upload Implements the Distutils 'upload' subcommand (upload package to a package index). """ import os import io import hashlib from base64 import standard_b64encode from urllib.request import urlopen, Request, HTTPError from urllib.parse import urlparse from distutils.errors import DistutilsError, DistutilsOptionError from distutils.core import PyPIRCCommand from distutils.spawn import spawn from distutils import log # PyPI Warehouse supports MD5, SHA256, and Blake2 (blake2-256) # https://bugs.python.org/issue40698 _FILE_CONTENT_DIGESTS = { "md5_digest": getattr(hashlib, "md5", None), "sha256_digest": getattr(hashlib, "sha256", None), "blake2_256_digest": getattr(hashlib, "blake2b", None), } class upload(PyPIRCCommand): description = "upload binary package to PyPI" user_options = PyPIRCCommand.user_options + [ ('sign', 's', 'sign files to upload using gpg'), ('identity=', 'i', 'GPG identity used to sign files'), ] boolean_options = PyPIRCCommand.boolean_options + ['sign'] def initialize_options(self): PyPIRCCommand.initialize_options(self) self.username = '' self.password = '' self.show_response = 0 self.sign = False self.identity = None def finalize_options(self): PyPIRCCommand.finalize_options(self) if self.identity and not self.sign: raise DistutilsOptionError( "Must use --sign for --identity to have meaning" ) config = self._read_pypirc() if config != {}: self.username = config['username'] self.password = config['password'] self.repository = config['repository'] self.realm = config['realm'] # getting the password from the distribution # if previously set by the register command if not self.password and self.distribution.password: self.password = self.distribution.password def run(self): if not self.distribution.dist_files: msg = ("Must create and upload files in one command " "(e.g. setup.py sdist upload)") raise DistutilsOptionError(msg) for command, pyversion, filename in self.distribution.dist_files: self.upload_file(command, pyversion, filename) def upload_file(self, command, pyversion, filename): # Makes sure the repository URL is compliant schema, netloc, url, params, query, fragments = \ urlparse(self.repository) if params or query or fragments: raise AssertionError("Incompatible url %s" % self.repository) if schema not in ('http', 'https'): raise AssertionError("unsupported schema " + schema) # Sign if requested if self.sign: gpg_args = ["gpg", "--detach-sign", "-a", filename] if self.identity: gpg_args[2:2] = ["--local-user", self.identity] spawn(gpg_args, dry_run=self.dry_run) # Fill in the data - send all the meta-data in case we need to # register a new release f = open(filename,'rb') try: content = f.read() finally: f.close() meta = self.distribution.metadata data = { # action ':action': 'file_upload', 'protocol_version': '1', # identify release 'name': meta.get_name(), 'version': meta.get_version(), # file content 'content': (os.path.basename(filename),content), 'filetype': command, 'pyversion': pyversion, # additional meta-data 'metadata_version': '1.0', 'summary': meta.get_description(), 'home_page': meta.get_url(), 'author': meta.get_contact(), 'author_email': meta.get_contact_email(), 'license': meta.get_licence(), 'description': meta.get_long_description(), 'keywords': meta.get_keywords(), 'platform': meta.get_platforms(), 'classifiers': meta.get_classifiers(), 'download_url': meta.get_download_url(), # PEP 314 'provides': meta.get_provides(), 'requires': meta.get_requires(), 'obsoletes': meta.get_obsoletes(), } data['comment'] = '' # file content digests for digest_name, digest_cons in _FILE_CONTENT_DIGESTS.items(): if digest_cons is None: continue try: data[digest_name] = digest_cons(content).hexdigest() except ValueError: # hash digest not available or blocked by security policy pass if self.sign: with open(filename + ".asc", "rb") as f: data['gpg_signature'] = (os.path.basename(filename) + ".asc", f.read()) # set up the authentication user_pass = (self.username + ":" + self.password).encode('ascii') # The exact encoding of the authentication string is debated. # Anyway PyPI only accepts ascii for both username or password. auth = "Basic " + standard_b64encode(user_pass).decode('ascii') # Build up the MIME payload for the POST data boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' sep_boundary = b'\r\n--' + boundary.encode('ascii') end_boundary = sep_boundary + b'--\r\n' body = io.BytesIO() for key, value in data.items(): title = '\r\nContent-Disposition: form-data; name="%s"' % key # handle multiple entries for the same name if not isinstance(value, list): value = [value] for value in value: if type(value) is tuple: title += '; filename="%s"' % value[0] value = value[1] else: value = str(value).encode('utf-8') body.write(sep_boundary) body.write(title.encode('utf-8')) body.write(b"\r\n\r\n") body.write(value) body.write(end_boundary) body = body.getvalue() msg = "Submitting %s to %s" % (filename, self.repository) self.announce(msg, log.INFO) # build the Request headers = { 'Content-type': 'multipart/form-data; boundary=%s' % boundary, 'Content-length': str(len(body)), 'Authorization': auth, } request = Request(self.repository, data=body, headers=headers) # send the data try: result = urlopen(request) status = result.getcode() reason = result.msg except HTTPError as e: status = e.code reason = e.msg except OSError as e: self.announce(str(e), log.ERROR) raise if status == 200: self.announce('Server response (%s): %s' % (status, reason), log.INFO) if self.show_response: text = self._read_pypi_response(result) msg = '\n'.join(('-' * 75, text, '-' * 75)) self.announce(msg, log.INFO) else: msg = 'Upload failed (%s): %s' % (status, reason) self.announce(msg, log.ERROR) raise DistutilsError(msg)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/register.py
"""distutils.command.register Implements the Distutils 'register' command (register with the repository). """ # created 2002/10/21, Richard Jones import getpass import io import urllib.parse, urllib.request from warnings import warn from distutils.core import PyPIRCCommand from distutils.errors import * from distutils import log class register(PyPIRCCommand): description = ("register the distribution with the Python package index") user_options = PyPIRCCommand.user_options + [ ('list-classifiers', None, 'list the valid Trove classifiers'), ('strict', None , 'Will stop the registering if the meta-data are not fully compliant') ] boolean_options = PyPIRCCommand.boolean_options + [ 'verify', 'list-classifiers', 'strict'] sub_commands = [('check', lambda self: True)] def initialize_options(self): PyPIRCCommand.initialize_options(self) self.list_classifiers = 0 self.strict = 0 def finalize_options(self): PyPIRCCommand.finalize_options(self) # setting options for the `check` subcommand check_options = {'strict': ('register', self.strict), 'restructuredtext': ('register', 1)} self.distribution.command_options['check'] = check_options def run(self): self.finalize_options() self._set_config() # Run sub commands for cmd_name in self.get_sub_commands(): self.run_command(cmd_name) if self.dry_run: self.verify_metadata() elif self.list_classifiers: self.classifiers() else: self.send_metadata() def check_metadata(self): """Deprecated API.""" warn("distutils.command.register.check_metadata is deprecated, \ use the check command instead", PendingDeprecationWarning) check = self.distribution.get_command_obj('check') check.ensure_finalized() check.strict = self.strict check.restructuredtext = 1 check.run() def _set_config(self): ''' Reads the configuration file and set attributes. ''' config = self._read_pypirc() if config != {}: self.username = config['username'] self.password = config['password'] self.repository = config['repository'] self.realm = config['realm'] self.has_config = True else: if self.repository not in ('pypi', self.DEFAULT_REPOSITORY): raise ValueError('%s not found in .pypirc' % self.repository) if self.repository == 'pypi': self.repository = self.DEFAULT_REPOSITORY self.has_config = False def classifiers(self): ''' Fetch the list of classifiers from the server. ''' url = self.repository+'?:action=list_classifiers' response = urllib.request.urlopen(url) log.info(self._read_pypi_response(response)) def verify_metadata(self): ''' Send the metadata to the package index server to be checked. ''' # send the info to the server and report the result (code, result) = self.post_to_server(self.build_post_data('verify')) log.info('Server response (%s): %s', code, result) def send_metadata(self): ''' Send the metadata to the package index server. Well, do the following: 1. figure who the user is, and then 2. send the data as a Basic auth'ed POST. First we try to read the username/password from $HOME/.pypirc, which is a ConfigParser-formatted file with a section [distutils] containing username and password entries (both in clear text). Eg: [distutils] index-servers = pypi [pypi] username: fred password: sekrit Otherwise, to figure who the user is, we offer the user three choices: 1. use existing login, 2. register as a new user, or 3. set the password to a random string and email the user. ''' # see if we can short-cut and get the username/password from the # config if self.has_config: choice = '1' username = self.username password = self.password else: choice = 'x' username = password = '' # get the user's login info choices = '1 2 3 4'.split() while choice not in choices: self.announce('''\ We need to know who you are, so please choose either: 1. use your existing login, 2. register as a new user, 3. have the server generate a new password for you (and email it to you), or 4. quit Your selection [default 1]: ''', log.INFO) choice = input() if not choice: choice = '1' elif choice not in choices: print('Please choose one of the four options!') if choice == '1': # get the username and password while not username: username = input('Username: ') while not password: password = getpass.getpass('Password: ') # set up the authentication auth = urllib.request.HTTPPasswordMgr() host = urllib.parse.urlparse(self.repository)[1] auth.add_password(self.realm, host, username, password) # send the info to the server and report the result code, result = self.post_to_server(self.build_post_data('submit'), auth) self.announce('Server response (%s): %s' % (code, result), log.INFO) # possibly save the login if code == 200: if self.has_config: # sharing the password in the distribution instance # so the upload command can reuse it self.distribution.password = password else: self.announce(('I can store your PyPI login so future ' 'submissions will be faster.'), log.INFO) self.announce('(the login will be stored in %s)' % \ self._get_rc_file(), log.INFO) choice = 'X' while choice.lower() not in 'yn': choice = input('Save your login (y/N)?') if not choice: choice = 'n' if choice.lower() == 'y': self._store_pypirc(username, password) elif choice == '2': data = {':action': 'user'} data['name'] = data['password'] = data['email'] = '' data['confirm'] = None while not data['name']: data['name'] = input('Username: ') while data['password'] != data['confirm']: while not data['password']: data['password'] = getpass.getpass('Password: ') while not data['confirm']: data['confirm'] = getpass.getpass(' Confirm: ') if data['password'] != data['confirm']: data['password'] = '' data['confirm'] = None print("Password and confirm don't match!") while not data['email']: data['email'] = input(' EMail: ') code, result = self.post_to_server(data) if code != 200: log.info('Server response (%s): %s', code, result) else: log.info('You will receive an email shortly.') log.info(('Follow the instructions in it to ' 'complete registration.')) elif choice == '3': data = {':action': 'password_reset'} data['email'] = '' while not data['email']: data['email'] = input('Your email address: ') code, result = self.post_to_server(data) log.info('Server response (%s): %s', code, result) def build_post_data(self, action): # figure the data to send - the metadata plus some additional # information used by the package server meta = self.distribution.metadata data = { ':action': action, 'metadata_version' : '1.0', 'name': meta.get_name(), 'version': meta.get_version(), 'summary': meta.get_description(), 'home_page': meta.get_url(), 'author': meta.get_contact(), 'author_email': meta.get_contact_email(), 'license': meta.get_licence(), 'description': meta.get_long_description(), 'keywords': meta.get_keywords(), 'platform': meta.get_platforms(), 'classifiers': meta.get_classifiers(), 'download_url': meta.get_download_url(), # PEP 314 'provides': meta.get_provides(), 'requires': meta.get_requires(), 'obsoletes': meta.get_obsoletes(), } if data['provides'] or data['requires'] or data['obsoletes']: data['metadata_version'] = '1.1' return data def post_to_server(self, data, auth=None): ''' Post a query to the server, and return a string response. ''' if 'name' in data: self.announce('Registering %s to %s' % (data['name'], self.repository), log.INFO) # Build up the MIME payload for the urllib2 POST data boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' sep_boundary = '\n--' + boundary end_boundary = sep_boundary + '--' body = io.StringIO() for key, value in data.items(): # handle multiple entries for the same name if type(value) not in (type([]), type( () )): value = [value] for value in value: value = str(value) body.write(sep_boundary) body.write('\nContent-Disposition: form-data; name="%s"'%key) body.write("\n\n") body.write(value) if value and value[-1] == '\r': body.write('\n') # write an extra newline (lurve Macs) body.write(end_boundary) body.write("\n") body = body.getvalue().encode("utf-8") # build the Request headers = { 'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary, 'Content-length': str(len(body)) } req = urllib.request.Request(self.repository, body, headers) # handle HTTP and include the Basic Auth handler opener = urllib.request.build_opener( urllib.request.HTTPBasicAuthHandler(password_mgr=auth) ) data = '' try: result = opener.open(req) except urllib.error.HTTPError as e: if self.show_response: data = e.fp.read() result = e.code, e.msg except urllib.error.URLError as e: result = 500, str(e) else: if self.show_response: data = self._read_pypi_response(result) result = 200, 'OK' if self.show_response: msg = '\n'.join(('-' * 75, data, '-' * 75)) self.announce(msg, log.INFO) return result
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/bdist_wininst.py
"""distutils.command.bdist_wininst Implements the Distutils 'bdist_wininst' command: create a windows installer exe-program.""" import os import sys import warnings from distutils.core import Command from distutils.util import get_platform from distutils.dir_util import remove_tree from distutils.errors import * from distutils.sysconfig import get_python_version from distutils import log class bdist_wininst(Command): description = "create an executable installer for MS Windows" user_options = [('bdist-dir=', None, "temporary directory for creating the distribution"), ('plat-name=', 'p', "platform name to embed in generated filenames " "(default: %s)" % get_platform()), ('keep-temp', 'k', "keep the pseudo-installation tree around after " + "creating the distribution archive"), ('target-version=', None, "require a specific python version" + " on the target system"), ('no-target-compile', 'c', "do not compile .py to .pyc on the target system"), ('no-target-optimize', 'o', "do not compile .py to .pyo (optimized) " "on the target system"), ('dist-dir=', 'd', "directory to put final built distributions in"), ('bitmap=', 'b', "bitmap to use for the installer instead of python-powered logo"), ('title=', 't', "title to display on the installer background instead of default"), ('skip-build', None, "skip rebuilding everything (for testing/debugging)"), ('install-script=', None, "basename of installation script to be run after " "installation or before deinstallation"), ('pre-install-script=', None, "Fully qualified filename of a script to be run before " "any files are installed. This script need not be in the " "distribution"), ('user-access-control=', None, "specify Vista's UAC handling - 'none'/default=no " "handling, 'auto'=use UAC if target Python installed for " "all users, 'force'=always use UAC"), ] boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize', 'skip-build'] # bpo-10945: bdist_wininst requires mbcs encoding only available on Windows _unsupported = (sys.platform != "win32") def __init__(self, *args, **kw): super().__init__(*args, **kw) warnings.warn("bdist_wininst command is deprecated since Python 3.8, " "use bdist_wheel (wheel packages) instead", DeprecationWarning, 2) def initialize_options(self): self.bdist_dir = None self.plat_name = None self.keep_temp = 0 self.no_target_compile = 0 self.no_target_optimize = 0 self.target_version = None self.dist_dir = None self.bitmap = None self.title = None self.skip_build = None self.install_script = None self.pre_install_script = None self.user_access_control = None def finalize_options(self): self.set_undefined_options('bdist', ('skip_build', 'skip_build')) if self.bdist_dir is None: if self.skip_build and self.plat_name: # If build is skipped and plat_name is overridden, bdist will # not see the correct 'plat_name' - so set that up manually. bdist = self.distribution.get_command_obj('bdist') bdist.plat_name = self.plat_name # next the command will be initialized using that name bdist_base = self.get_finalized_command('bdist').bdist_base self.bdist_dir = os.path.join(bdist_base, 'wininst') if not self.target_version: self.target_version = "" if not self.skip_build and self.distribution.has_ext_modules(): short_version = get_python_version() if self.target_version and self.target_version != short_version: raise DistutilsOptionError( "target version can only be %s, or the '--skip-build'" \ " option must be specified" % (short_version,)) self.target_version = short_version self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'), ('plat_name', 'plat_name'), ) if self.install_script: for script in self.distribution.scripts: if self.install_script == os.path.basename(script): break else: raise DistutilsOptionError( "install_script '%s' not found in scripts" % self.install_script) def run(self): if (sys.platform != "win32" and (self.distribution.has_ext_modules() or self.distribution.has_c_libraries())): raise DistutilsPlatformError \ ("distribution contains extensions and/or C libraries; " "must be compiled on a Windows 32 platform") if not self.skip_build: self.run_command('build') install = self.reinitialize_command('install', reinit_subcommands=1) install.root = self.bdist_dir install.skip_build = self.skip_build install.warn_dir = 0 install.plat_name = self.plat_name install_lib = self.reinitialize_command('install_lib') # we do not want to include pyc or pyo files install_lib.compile = 0 install_lib.optimize = 0 if self.distribution.has_ext_modules(): # If we are building an installer for a Python version other # than the one we are currently running, then we need to ensure # our build_lib reflects the other Python version rather than ours. # Note that for target_version!=sys.version, we must have skipped the # build step, so there is no issue with enforcing the build of this # version. target_version = self.target_version if not target_version: assert self.skip_build, "Should have already checked this" target_version = '%d.%d' % sys.version_info[:2] plat_specifier = ".%s-%s" % (self.plat_name, target_version) build = self.get_finalized_command('build') build.build_lib = os.path.join(build.build_base, 'lib' + plat_specifier) # Use a custom scheme for the zip-file, because we have to decide # at installation time which scheme to use. for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'): value = key.upper() if key == 'headers': value = value + '/Include/$dist_name' setattr(install, 'install_' + key, value) log.info("installing to %s", self.bdist_dir) install.ensure_finalized() # avoid warning of 'install_lib' about installing # into a directory not in sys.path sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB')) install.run() del sys.path[0] # And make an archive relative to the root of the # pseudo-installation tree. from tempfile import mktemp archive_basename = mktemp() fullname = self.distribution.get_fullname() arcname = self.make_archive(archive_basename, "zip", root_dir=self.bdist_dir) # create an exe containing the zip-file self.create_exe(arcname, fullname, self.bitmap) if self.distribution.has_ext_modules(): pyversion = get_python_version() else: pyversion = 'any' self.distribution.dist_files.append(('bdist_wininst', pyversion, self.get_installer_filename(fullname))) # remove the zip-file again log.debug("removing temporary file '%s'", arcname) os.remove(arcname) if not self.keep_temp: remove_tree(self.bdist_dir, dry_run=self.dry_run) def get_inidata(self): # Return data describing the installation. lines = [] metadata = self.distribution.metadata # Write the [metadata] section. lines.append("[metadata]") # 'info' will be displayed in the installer's dialog box, # describing the items to be installed. info = (metadata.long_description or '') + '\n' # Escape newline characters def escape(s): return s.replace("\n", "\\n") for name in ["author", "author_email", "description", "maintainer", "maintainer_email", "name", "url", "version"]: data = getattr(metadata, name, "") if data: info = info + ("\n %s: %s" % \ (name.capitalize(), escape(data))) lines.append("%s=%s" % (name, escape(data))) # The [setup] section contains entries controlling # the installer runtime. lines.append("\n[Setup]") if self.install_script: lines.append("install_script=%s" % self.install_script) lines.append("info=%s" % escape(info)) lines.append("target_compile=%d" % (not self.no_target_compile)) lines.append("target_optimize=%d" % (not self.no_target_optimize)) if self.target_version: lines.append("target_version=%s" % self.target_version) if self.user_access_control: lines.append("user_access_control=%s" % self.user_access_control) title = self.title or self.distribution.get_fullname() lines.append("title=%s" % escape(title)) import time import distutils build_info = "Built %s with distutils-%s" % \ (time.ctime(time.time()), distutils.__version__) lines.append("build_info=%s" % build_info) return "\n".join(lines) def create_exe(self, arcname, fullname, bitmap=None): import struct self.mkpath(self.dist_dir) cfgdata = self.get_inidata() installer_name = self.get_installer_filename(fullname) self.announce("creating %s" % installer_name) if bitmap: with open(bitmap, "rb") as f: bitmapdata = f.read() bitmaplen = len(bitmapdata) else: bitmaplen = 0 with open(installer_name, "wb") as file: file.write(self.get_exe_bytes()) if bitmap: file.write(bitmapdata) # Convert cfgdata from unicode to ascii, mbcs encoded if isinstance(cfgdata, str): cfgdata = cfgdata.encode("mbcs") # Append the pre-install script cfgdata = cfgdata + b"\0" if self.pre_install_script: # We need to normalize newlines, so we open in text mode and # convert back to bytes. "latin-1" simply avoids any possible # failures. with open(self.pre_install_script, "r", encoding="latin-1") as script: script_data = script.read().encode("latin-1") cfgdata = cfgdata + script_data + b"\n\0" else: # empty pre-install script cfgdata = cfgdata + b"\0" file.write(cfgdata) # The 'magic number' 0x1234567B is used to make sure that the # binary layout of 'cfgdata' is what the wininst.exe binary # expects. If the layout changes, increment that number, make # the corresponding changes to the wininst.exe sources, and # recompile them. header = struct.pack("<iii", 0x1234567B, # tag len(cfgdata), # length bitmaplen, # number of bytes in bitmap ) file.write(header) with open(arcname, "rb") as f: file.write(f.read()) def get_installer_filename(self, fullname): # Factored out to allow overriding in subclasses if self.target_version: # if we create an installer for a specific python version, # it's better to include this in the name installer_name = os.path.join(self.dist_dir, "%s.%s-py%s.exe" % (fullname, self.plat_name, self.target_version)) else: installer_name = os.path.join(self.dist_dir, "%s.%s.exe" % (fullname, self.plat_name)) return installer_name def get_exe_bytes(self): # If a target-version other than the current version has been # specified, then using the MSVC version from *this* build is no good. # Without actually finding and executing the target version and parsing # its sys.version, we just hard-code our knowledge of old versions. # NOTE: Possible alternative is to allow "--target-version" to # specify a Python executable rather than a simple version string. # We can then execute this program to obtain any info we need, such # as the real sys.version string for the build. cur_version = get_python_version() # If the target version is *later* than us, then we assume they # use what we use # string compares seem wrong, but are what sysconfig.py itself uses if self.target_version and self.target_version < cur_version: if self.target_version < "2.4": bv = '6.0' elif self.target_version == "2.4": bv = '7.1' elif self.target_version == "2.5": bv = '8.0' elif self.target_version <= "3.2": bv = '9.0' elif self.target_version <= "3.4": bv = '10.0' else: bv = '14.0' else: # for current version - use authoritative check. try: from msvcrt import CRT_ASSEMBLY_VERSION except ImportError: # cross-building, so assume the latest version bv = '14.0' else: # as far as we know, CRT is binary compatible based on # the first field, so assume 'x.0' until proven otherwise major = CRT_ASSEMBLY_VERSION.partition('.')[0] bv = major + '.0' # wininst-x.y.exe is in the same directory as this file directory = os.path.dirname(__file__) # we must use a wininst-x.y.exe built with the same C compiler # used for python. XXX What about mingw, borland, and so on? # if plat_name starts with "win" but is not "win32" # we want to strip "win" and leave the rest (e.g. -amd64) # for all other cases, we don't want any suffix if self.plat_name != 'win32' and self.plat_name[:3] == 'win': sfix = self.plat_name[3:] else: sfix = '' filename = os.path.join(directory, "wininst-%s%s.exe" % (bv, sfix)) f = open(filename, "rb") try: return f.read() finally: f.close()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/install_headers.py
"""distutils.command.install_headers Implements the Distutils 'install_headers' command, to install C/C++ header files to the Python include directory.""" from distutils.core import Command # XXX force is never used class install_headers(Command): description = "install C/C++ header files" user_options = [('install-dir=', 'd', "directory to install header files to"), ('force', 'f', "force installation (overwrite existing files)"), ] boolean_options = ['force'] def initialize_options(self): self.install_dir = None self.force = 0 self.outfiles = [] def finalize_options(self): self.set_undefined_options('install', ('install_headers', 'install_dir'), ('force', 'force')) def run(self): headers = self.distribution.headers if not headers: return self.mkpath(self.install_dir) for header in headers: (out, _) = self.copy_file(header, self.install_dir) self.outfiles.append(out) def get_inputs(self): return self.distribution.headers or [] def get_outputs(self): return self.outfiles
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/install_lib.py
"""distutils.command.install_lib Implements the Distutils 'install_lib' command (install all Python modules).""" import os import importlib.util import sys from distutils.core import Command from distutils.errors import DistutilsOptionError # Extension for Python source files. PYTHON_SOURCE_EXTENSION = ".py" class install_lib(Command): description = "install all Python modules (extensions and pure Python)" # The byte-compilation options are a tad confusing. Here are the # possible scenarios: # 1) no compilation at all (--no-compile --no-optimize) # 2) compile .pyc only (--compile --no-optimize; default) # 3) compile .pyc and "opt-1" .pyc (--compile --optimize) # 4) compile "opt-1" .pyc only (--no-compile --optimize) # 5) compile .pyc and "opt-2" .pyc (--compile --optimize-more) # 6) compile "opt-2" .pyc only (--no-compile --optimize-more) # # The UI for this is two options, 'compile' and 'optimize'. # 'compile' is strictly boolean, and only decides whether to # generate .pyc files. 'optimize' is three-way (0, 1, or 2), and # decides both whether to generate .pyc files and what level of # optimization to use. user_options = [ ('install-dir=', 'd', "directory to install to"), ('build-dir=','b', "build directory (where to install from)"), ('force', 'f', "force installation (overwrite existing files)"), ('compile', 'c', "compile .py to .pyc [default]"), ('no-compile', None, "don't compile .py files"), ('optimize=', 'O', "also compile with optimization: -O1 for \"python -O\", " "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), ('skip-build', None, "skip the build steps"), ] boolean_options = ['force', 'compile', 'skip-build'] negative_opt = {'no-compile' : 'compile'} def initialize_options(self): # let the 'install' command dictate our installation directory self.install_dir = None self.build_dir = None self.force = 0 self.compile = None self.optimize = None self.skip_build = None def finalize_options(self): # Get all the information we need to install pure Python modules # from the umbrella 'install' command -- build (source) directory, # install (target) directory, and whether to compile .py files. self.set_undefined_options('install', ('build_lib', 'build_dir'), ('install_lib', 'install_dir'), ('force', 'force'), ('compile', 'compile'), ('optimize', 'optimize'), ('skip_build', 'skip_build'), ) if self.compile is None: self.compile = True if self.optimize is None: self.optimize = False if not isinstance(self.optimize, int): try: self.optimize = int(self.optimize) if self.optimize not in (0, 1, 2): raise AssertionError except (ValueError, AssertionError): raise DistutilsOptionError("optimize must be 0, 1, or 2") def run(self): # Make sure we have built everything we need first self.build() # Install everything: simply dump the entire contents of the build # directory to the installation directory (that's the beauty of # having a build directory!) outfiles = self.install() # (Optionally) compile .py to .pyc if outfiles is not None and self.distribution.has_pure_modules(): self.byte_compile(outfiles) # -- Top-level worker functions ------------------------------------ # (called from 'run()') def build(self): if not self.skip_build: if self.distribution.has_pure_modules(): self.run_command('build_py') if self.distribution.has_ext_modules(): self.run_command('build_ext') def install(self): if os.path.isdir(self.build_dir): outfiles = self.copy_tree(self.build_dir, self.install_dir) else: self.warn("'%s' does not exist -- no Python modules to install" % self.build_dir) return return outfiles def byte_compile(self, files): if sys.dont_write_bytecode: self.warn('byte-compiling is disabled, skipping.') return from distutils.util import byte_compile # Get the "--root" directory supplied to the "install" command, # and use it as a prefix to strip off the purported filename # encoded in bytecode files. This is far from complete, but it # should at least generate usable bytecode in RPM distributions. install_root = self.get_finalized_command('install').root if self.compile: byte_compile(files, optimize=0, force=self.force, prefix=install_root, dry_run=self.dry_run) if self.optimize > 0: byte_compile(files, optimize=self.optimize, force=self.force, prefix=install_root, verbose=self.verbose, dry_run=self.dry_run) # -- Utility methods ----------------------------------------------- def _mutate_outputs(self, has_any, build_cmd, cmd_option, output_dir): if not has_any: return [] build_cmd = self.get_finalized_command(build_cmd) build_files = build_cmd.get_outputs() build_dir = getattr(build_cmd, cmd_option) prefix_len = len(build_dir) + len(os.sep) outputs = [] for file in build_files: outputs.append(os.path.join(output_dir, file[prefix_len:])) return outputs def _bytecode_filenames(self, py_filenames): bytecode_files = [] for py_file in py_filenames: # Since build_py handles package data installation, the # list of outputs can contain more than just .py files. # Make sure we only report bytecode for the .py files. ext = os.path.splitext(os.path.normcase(py_file))[1] if ext != PYTHON_SOURCE_EXTENSION: continue if self.compile: bytecode_files.append(importlib.util.cache_from_source( py_file, optimization='')) if self.optimize > 0: bytecode_files.append(importlib.util.cache_from_source( py_file, optimization=self.optimize)) return bytecode_files # -- External interface -------------------------------------------- # (called by outsiders) def get_outputs(self): """Return the list of files that would be installed if this command were actually run. Not affected by the "dry-run" flag or whether modules have actually been built yet. """ pure_outputs = \ self._mutate_outputs(self.distribution.has_pure_modules(), 'build_py', 'build_lib', self.install_dir) if self.compile: bytecode_outputs = self._bytecode_filenames(pure_outputs) else: bytecode_outputs = [] ext_outputs = \ self._mutate_outputs(self.distribution.has_ext_modules(), 'build_ext', 'build_lib', self.install_dir) return pure_outputs + bytecode_outputs + ext_outputs def get_inputs(self): """Get the list of files that are input to this command, ie. the files that get installed as they are named in the build tree. The files in this list correspond one-to-one to the output filenames returned by 'get_outputs()'. """ inputs = [] if self.distribution.has_pure_modules(): build_py = self.get_finalized_command('build_py') inputs.extend(build_py.get_outputs()) if self.distribution.has_ext_modules(): build_ext = self.get_finalized_command('build_ext') inputs.extend(build_ext.get_outputs()) return inputs
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/build_py.py
"""distutils.command.build_py Implements the Distutils 'build_py' command.""" import os import importlib.util import sys import glob from distutils.core import Command from distutils.errors import * from distutils.util import convert_path, Mixin2to3 from distutils import log class build_py (Command): description = "\"build\" pure Python modules (copy to build directory)" user_options = [ ('build-lib=', 'd', "directory to \"build\" (copy) to"), ('compile', 'c', "compile .py to .pyc"), ('no-compile', None, "don't compile .py files [default]"), ('optimize=', 'O', "also compile with optimization: -O1 for \"python -O\", " "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), ('force', 'f', "forcibly build everything (ignore file timestamps)"), ] boolean_options = ['compile', 'force'] negative_opt = {'no-compile' : 'compile'} def initialize_options(self): self.build_lib = None self.py_modules = None self.package = None self.package_data = None self.package_dir = None self.compile = 0 self.optimize = 0 self.force = None def finalize_options(self): self.set_undefined_options('build', ('build_lib', 'build_lib'), ('force', 'force')) # Get the distribution options that are aliases for build_py # options -- list of packages and list of modules. self.packages = self.distribution.packages self.py_modules = self.distribution.py_modules self.package_data = self.distribution.package_data self.package_dir = {} if self.distribution.package_dir: for name, path in self.distribution.package_dir.items(): self.package_dir[name] = convert_path(path) self.data_files = self.get_data_files() # Ick, copied straight from install_lib.py (fancy_getopt needs a # type system! Hell, *everything* needs a type system!!!) if not isinstance(self.optimize, int): try: self.optimize = int(self.optimize) assert 0 <= self.optimize <= 2 except (ValueError, AssertionError): raise DistutilsOptionError("optimize must be 0, 1, or 2") def run(self): # XXX copy_file by default preserves atime and mtime. IMHO this is # the right thing to do, but perhaps it should be an option -- in # particular, a site administrator might want installed files to # reflect the time of installation rather than the last # modification time before the installed release. # XXX copy_file by default preserves mode, which appears to be the # wrong thing to do: if a file is read-only in the working # directory, we want it to be installed read/write so that the next # installation of the same module distribution can overwrite it # without problems. (This might be a Unix-specific issue.) Thus # we turn off 'preserve_mode' when copying to the build directory, # since the build directory is supposed to be exactly what the # installation will look like (ie. we preserve mode when # installing). # Two options control which modules will be installed: 'packages' # and 'py_modules'. The former lets us work with whole packages, not # specifying individual modules at all; the latter is for # specifying modules one-at-a-time. if self.py_modules: self.build_modules() if self.packages: self.build_packages() self.build_package_data() self.byte_compile(self.get_outputs(include_bytecode=0)) def get_data_files(self): """Generate list of '(package,src_dir,build_dir,filenames)' tuples""" data = [] if not self.packages: return data for package in self.packages: # Locate package source directory src_dir = self.get_package_dir(package) # Compute package build directory build_dir = os.path.join(*([self.build_lib] + package.split('.'))) # Length of path to strip from found files plen = 0 if src_dir: plen = len(src_dir)+1 # Strip directory from globbed filenames filenames = [ file[plen:] for file in self.find_data_files(package, src_dir) ] data.append((package, src_dir, build_dir, filenames)) return data def find_data_files(self, package, src_dir): """Return filenames for package's data files in 'src_dir'""" globs = (self.package_data.get('', []) + self.package_data.get(package, [])) files = [] for pattern in globs: # Each pattern has to be converted to a platform-specific path filelist = glob.glob(os.path.join(glob.escape(src_dir), convert_path(pattern))) # Files that match more than one pattern are only added once files.extend([fn for fn in filelist if fn not in files and os.path.isfile(fn)]) return files def build_package_data(self): """Copy data files into build directory""" lastdir = None for package, src_dir, build_dir, filenames in self.data_files: for filename in filenames: target = os.path.join(build_dir, filename) self.mkpath(os.path.dirname(target)) self.copy_file(os.path.join(src_dir, filename), target, preserve_mode=False) def get_package_dir(self, package): """Return the directory, relative to the top of the source distribution, where package 'package' should be found (at least according to the 'package_dir' option, if any).""" path = package.split('.') if not self.package_dir: if path: return os.path.join(*path) else: return '' else: tail = [] while path: try: pdir = self.package_dir['.'.join(path)] except KeyError: tail.insert(0, path[-1]) del path[-1] else: tail.insert(0, pdir) return os.path.join(*tail) else: # Oops, got all the way through 'path' without finding a # match in package_dir. If package_dir defines a directory # for the root (nameless) package, then fallback on it; # otherwise, we might as well have not consulted # package_dir at all, as we just use the directory implied # by 'tail' (which should be the same as the original value # of 'path' at this point). pdir = self.package_dir.get('') if pdir is not None: tail.insert(0, pdir) if tail: return os.path.join(*tail) else: return '' def check_package(self, package, package_dir): # Empty dir name means current directory, which we can probably # assume exists. Also, os.path.exists and isdir don't know about # my "empty string means current dir" convention, so we have to # circumvent them. if package_dir != "": if not os.path.exists(package_dir): raise DistutilsFileError( "package directory '%s' does not exist" % package_dir) if not os.path.isdir(package_dir): raise DistutilsFileError( "supposed package directory '%s' exists, " "but is not a directory" % package_dir) # Require __init__.py for all but the "root package" if package: init_py = os.path.join(package_dir, "__init__.py") if os.path.isfile(init_py): return init_py else: log.warn(("package init file '%s' not found " + "(or not a regular file)"), init_py) # Either not in a package at all (__init__.py not expected), or # __init__.py doesn't exist -- so don't return the filename. return None def check_module(self, module, module_file): if not os.path.isfile(module_file): log.warn("file %s (for module %s) not found", module_file, module) return False else: return True def find_package_modules(self, package, package_dir): self.check_package(package, package_dir) module_files = glob.glob(os.path.join(glob.escape(package_dir), "*.py")) modules = [] setup_script = os.path.abspath(self.distribution.script_name) for f in module_files: abs_f = os.path.abspath(f) if abs_f != setup_script: module = os.path.splitext(os.path.basename(f))[0] modules.append((package, module, f)) else: self.debug_print("excluding %s" % setup_script) return modules def find_modules(self): """Finds individually-specified Python modules, ie. those listed by module name in 'self.py_modules'. Returns a list of tuples (package, module_base, filename): 'package' is a tuple of the path through package-space to the module; 'module_base' is the bare (no packages, no dots) module name, and 'filename' is the path to the ".py" file (relative to the distribution root) that implements the module. """ # Map package names to tuples of useful info about the package: # (package_dir, checked) # package_dir - the directory where we'll find source files for # this package # checked - true if we have checked that the package directory # is valid (exists, contains __init__.py, ... ?) packages = {} # List of (package, module, filename) tuples to return modules = [] # We treat modules-in-packages almost the same as toplevel modules, # just the "package" for a toplevel is empty (either an empty # string or empty list, depending on context). Differences: # - don't check for __init__.py in directory for empty package for module in self.py_modules: path = module.split('.') package = '.'.join(path[0:-1]) module_base = path[-1] try: (package_dir, checked) = packages[package] except KeyError: package_dir = self.get_package_dir(package) checked = 0 if not checked: init_py = self.check_package(package, package_dir) packages[package] = (package_dir, 1) if init_py: modules.append((package, "__init__", init_py)) # XXX perhaps we should also check for just .pyc files # (so greedy closed-source bastards can distribute Python # modules too) module_file = os.path.join(package_dir, module_base + ".py") if not self.check_module(module, module_file): continue modules.append((package, module_base, module_file)) return modules def find_all_modules(self): """Compute the list of all modules that will be built, whether they are specified one-module-at-a-time ('self.py_modules') or by whole packages ('self.packages'). Return a list of tuples (package, module, module_file), just like 'find_modules()' and 'find_package_modules()' do.""" modules = [] if self.py_modules: modules.extend(self.find_modules()) if self.packages: for package in self.packages: package_dir = self.get_package_dir(package) m = self.find_package_modules(package, package_dir) modules.extend(m) return modules def get_source_files(self): return [module[-1] for module in self.find_all_modules()] def get_module_outfile(self, build_dir, package, module): outfile_path = [build_dir] + list(package) + [module + ".py"] return os.path.join(*outfile_path) def get_outputs(self, include_bytecode=1): modules = self.find_all_modules() outputs = [] for (package, module, module_file) in modules: package = package.split('.') filename = self.get_module_outfile(self.build_lib, package, module) outputs.append(filename) if include_bytecode: if self.compile: outputs.append(importlib.util.cache_from_source( filename, optimization='')) if self.optimize > 0: outputs.append(importlib.util.cache_from_source( filename, optimization=self.optimize)) outputs += [ os.path.join(build_dir, filename) for package, src_dir, build_dir, filenames in self.data_files for filename in filenames ] return outputs def build_module(self, module, module_file, package): if isinstance(package, str): package = package.split('.') elif not isinstance(package, (list, tuple)): raise TypeError( "'package' must be a string (dot-separated), list, or tuple") # Now put the module source file into the "build" area -- this is # easy, we just copy it somewhere under self.build_lib (the build # directory for Python source). outfile = self.get_module_outfile(self.build_lib, package, module) dir = os.path.dirname(outfile) self.mkpath(dir) return self.copy_file(module_file, outfile, preserve_mode=0) def build_modules(self): modules = self.find_modules() for (package, module, module_file) in modules: # Now "build" the module -- ie. copy the source file to # self.build_lib (the build directory for Python source). # (Actually, it gets copied to the directory for this package # under self.build_lib.) self.build_module(module, module_file, package) def build_packages(self): for package in self.packages: # Get list of (package, module, module_file) tuples based on # scanning the package directory. 'package' is only included # in the tuple so that 'find_modules()' and # 'find_package_tuples()' have a consistent interface; it's # ignored here (apart from a sanity check). Also, 'module' is # the *unqualified* module name (ie. no dots, no package -- we # already know its package!), and 'module_file' is the path to # the .py file, relative to the current directory # (ie. including 'package_dir'). package_dir = self.get_package_dir(package) modules = self.find_package_modules(package, package_dir) # Now loop over the modules we found, "building" each one (just # copy it to self.build_lib). for (package_, module, module_file) in modules: assert package == package_ self.build_module(module, module_file, package) def byte_compile(self, files): if sys.dont_write_bytecode: self.warn('byte-compiling is disabled, skipping.') return from distutils.util import byte_compile prefix = self.build_lib if prefix[-1] != os.sep: prefix = prefix + os.sep # XXX this code is essentially the same as the 'byte_compile() # method of the "install_lib" command, except for the determination # of the 'prefix' string. Hmmm. if self.compile: byte_compile(files, optimize=0, force=self.force, prefix=prefix, dry_run=self.dry_run) if self.optimize > 0: byte_compile(files, optimize=self.optimize, force=self.force, prefix=prefix, dry_run=self.dry_run) class build_py_2to3(build_py, Mixin2to3): def run(self): self.updated_files = [] # Base class code if self.py_modules: self.build_modules() if self.packages: self.build_packages() self.build_package_data() # 2to3 self.run_2to3(self.updated_files) # Remaining base class code self.byte_compile(self.get_outputs(include_bytecode=0)) def build_module(self, module, module_file, package): res = build_py.build_module(self, module, module_file, package) if res[1]: # file was copied self.updated_files.append(res[0]) return res
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/bdist_dumb.py
"""distutils.command.bdist_dumb Implements the Distutils 'bdist_dumb' command (create a "dumb" built distribution -- i.e., just an archive to be unpacked under $prefix or $exec_prefix).""" import os from distutils.core import Command from distutils.util import get_platform from distutils.dir_util import remove_tree, ensure_relative from distutils.errors import * from distutils.sysconfig import get_python_version from distutils import log class bdist_dumb(Command): description = "create a \"dumb\" built distribution" user_options = [('bdist-dir=', 'd', "temporary directory for creating the distribution"), ('plat-name=', 'p', "platform name to embed in generated filenames " "(default: %s)" % get_platform()), ('format=', 'f', "archive format to create (tar, gztar, bztar, xztar, " "ztar, zip)"), ('keep-temp', 'k', "keep the pseudo-installation tree around after " + "creating the distribution archive"), ('dist-dir=', 'd', "directory to put final built distributions in"), ('skip-build', None, "skip rebuilding everything (for testing/debugging)"), ('relative', None, "build the archive using relative paths " "(default: false)"), ('owner=', 'u', "Owner name used when creating a tar file" " [default: current user]"), ('group=', 'g', "Group name used when creating a tar file" " [default: current group]"), ] boolean_options = ['keep-temp', 'skip-build', 'relative'] default_format = { 'posix': 'gztar', 'nt': 'zip' } def initialize_options(self): self.bdist_dir = None self.plat_name = None self.format = None self.keep_temp = 0 self.dist_dir = None self.skip_build = None self.relative = 0 self.owner = None self.group = None def finalize_options(self): if self.bdist_dir is None: bdist_base = self.get_finalized_command('bdist').bdist_base self.bdist_dir = os.path.join(bdist_base, 'dumb') if self.format is None: try: self.format = self.default_format[os.name] except KeyError: raise DistutilsPlatformError( "don't know how to create dumb built distributions " "on platform %s" % os.name) self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'), ('plat_name', 'plat_name'), ('skip_build', 'skip_build')) def run(self): if not self.skip_build: self.run_command('build') install = self.reinitialize_command('install', reinit_subcommands=1) install.root = self.bdist_dir install.skip_build = self.skip_build install.warn_dir = 0 log.info("installing to %s", self.bdist_dir) self.run_command('install') # And make an archive relative to the root of the # pseudo-installation tree. archive_basename = "%s.%s" % (self.distribution.get_fullname(), self.plat_name) pseudoinstall_root = os.path.join(self.dist_dir, archive_basename) if not self.relative: archive_root = self.bdist_dir else: if (self.distribution.has_ext_modules() and (install.install_base != install.install_platbase)): raise DistutilsPlatformError( "can't make a dumb built distribution where " "base and platbase are different (%s, %s)" % (repr(install.install_base), repr(install.install_platbase))) else: archive_root = os.path.join(self.bdist_dir, ensure_relative(install.install_base)) # Make the archive filename = self.make_archive(pseudoinstall_root, self.format, root_dir=archive_root, owner=self.owner, group=self.group) if self.distribution.has_ext_modules(): pyversion = get_python_version() else: pyversion = 'any' self.distribution.dist_files.append(('bdist_dumb', pyversion, filename)) if not self.keep_temp: remove_tree(self.bdist_dir, dry_run=self.dry_run)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/__init__.py
"""distutils.command Package containing implementation of all the standard Distutils commands.""" __all__ = ['build', 'build_py', 'build_ext', 'build_clib', 'build_scripts', 'clean', 'install', 'install_lib', 'install_headers', 'install_scripts', 'install_data', 'sdist', 'register', 'bdist', 'bdist_dumb', 'bdist_rpm', 'bdist_wininst', 'check', 'upload', # These two are reserved for future use: #'bdist_sdux', #'bdist_pkgtool', # Note: # bdist_packager is not included because it only provides # an abstract base class ]
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/sdist.py
"""distutils.command.sdist Implements the Distutils 'sdist' command (create a source distribution).""" import os import sys from glob import glob from warnings import warn from distutils.core import Command from distutils import dir_util from distutils import file_util from distutils import archive_util from distutils.text_file import TextFile from distutils.filelist import FileList from distutils import log from distutils.util import convert_path from distutils.errors import DistutilsTemplateError, DistutilsOptionError def show_formats(): """Print all possible values for the 'formats' option (used by the "--help-formats" command-line option). """ from distutils.fancy_getopt import FancyGetopt from distutils.archive_util import ARCHIVE_FORMATS formats = [] for format in ARCHIVE_FORMATS.keys(): formats.append(("formats=" + format, None, ARCHIVE_FORMATS[format][2])) formats.sort() FancyGetopt(formats).print_help( "List of available source distribution formats:") class sdist(Command): description = "create a source distribution (tarball, zip file, etc.)" def checking_metadata(self): """Callable used for the check sub-command. Placed here so user_options can view it""" return self.metadata_check user_options = [ ('template=', 't', "name of manifest template file [default: MANIFEST.in]"), ('manifest=', 'm', "name of manifest file [default: MANIFEST]"), ('use-defaults', None, "include the default file set in the manifest " "[default; disable with --no-defaults]"), ('no-defaults', None, "don't include the default file set"), ('prune', None, "specifically exclude files/directories that should not be " "distributed (build tree, RCS/CVS dirs, etc.) " "[default; disable with --no-prune]"), ('no-prune', None, "don't automatically exclude anything"), ('manifest-only', 'o', "just regenerate the manifest and then stop " "(implies --force-manifest)"), ('force-manifest', 'f', "forcibly regenerate the manifest and carry on as usual. " "Deprecated: now the manifest is always regenerated."), ('formats=', None, "formats for source distribution (comma-separated list)"), ('keep-temp', 'k', "keep the distribution tree around after creating " + "archive file(s)"), ('dist-dir=', 'd', "directory to put the source distribution archive(s) in " "[default: dist]"), ('metadata-check', None, "Ensure that all required elements of meta-data " "are supplied. Warn if any missing. [default]"), ('owner=', 'u', "Owner name used when creating a tar file [default: current user]"), ('group=', 'g', "Group name used when creating a tar file [default: current group]"), ] boolean_options = ['use-defaults', 'prune', 'manifest-only', 'force-manifest', 'keep-temp', 'metadata-check'] help_options = [ ('help-formats', None, "list available distribution formats", show_formats), ] negative_opt = {'no-defaults': 'use-defaults', 'no-prune': 'prune' } sub_commands = [('check', checking_metadata)] READMES = ('README', 'README.txt', 'README.rst') def initialize_options(self): # 'template' and 'manifest' are, respectively, the names of # the manifest template and manifest file. self.template = None self.manifest = None # 'use_defaults': if true, we will include the default file set # in the manifest self.use_defaults = 1 self.prune = 1 self.manifest_only = 0 self.force_manifest = 0 self.formats = ['gztar'] self.keep_temp = 0 self.dist_dir = None self.archive_files = None self.metadata_check = 1 self.owner = None self.group = None def finalize_options(self): if self.manifest is None: self.manifest = "MANIFEST" if self.template is None: self.template = "MANIFEST.in" self.ensure_string_list('formats') bad_format = archive_util.check_archive_formats(self.formats) if bad_format: raise DistutilsOptionError( "unknown archive format '%s'" % bad_format) if self.dist_dir is None: self.dist_dir = "dist" def run(self): # 'filelist' contains the list of files that will make up the # manifest self.filelist = FileList() # Run sub commands for cmd_name in self.get_sub_commands(): self.run_command(cmd_name) # Do whatever it takes to get the list of files to process # (process the manifest template, read an existing manifest, # whatever). File list is accumulated in 'self.filelist'. self.get_file_list() # If user just wanted us to regenerate the manifest, stop now. if self.manifest_only: return # Otherwise, go ahead and create the source distribution tarball, # or zipfile, or whatever. self.make_distribution() def check_metadata(self): """Deprecated API.""" warn("distutils.command.sdist.check_metadata is deprecated, \ use the check command instead", PendingDeprecationWarning) check = self.distribution.get_command_obj('check') check.ensure_finalized() check.run() def get_file_list(self): """Figure out the list of files to include in the source distribution, and put it in 'self.filelist'. This might involve reading the manifest template (and writing the manifest), or just reading the manifest, or just using the default file set -- it all depends on the user's options. """ # new behavior when using a template: # the file list is recalculated every time because # even if MANIFEST.in or setup.py are not changed # the user might have added some files in the tree that # need to be included. # # This makes --force the default and only behavior with templates. template_exists = os.path.isfile(self.template) if not template_exists and self._manifest_is_not_generated(): self.read_manifest() self.filelist.sort() self.filelist.remove_duplicates() return if not template_exists: self.warn(("manifest template '%s' does not exist " + "(using default file list)") % self.template) self.filelist.findall() if self.use_defaults: self.add_defaults() if template_exists: self.read_template() if self.prune: self.prune_file_list() self.filelist.sort() self.filelist.remove_duplicates() self.write_manifest() def add_defaults(self): """Add all the default files to self.filelist: - README or README.txt - setup.py - test/test*.py - all pure Python modules mentioned in setup script - all files pointed by package_data (build_py) - all files defined in data_files. - all files defined as scripts. - all C sources listed as part of extensions or C libraries in the setup script (doesn't catch C headers!) Warns if (README or README.txt) or setup.py are missing; everything else is optional. """ self._add_defaults_standards() self._add_defaults_optional() self._add_defaults_python() self._add_defaults_data_files() self._add_defaults_ext() self._add_defaults_c_libs() self._add_defaults_scripts() @staticmethod def _cs_path_exists(fspath): """ Case-sensitive path existence check >>> sdist._cs_path_exists(__file__) True >>> sdist._cs_path_exists(__file__.upper()) False """ if not os.path.exists(fspath): return False # make absolute so we always have a directory abspath = os.path.abspath(fspath) directory, filename = os.path.split(abspath) return filename in os.listdir(directory) def _add_defaults_standards(self): standards = [self.READMES, self.distribution.script_name] for fn in standards: if isinstance(fn, tuple): alts = fn got_it = False for fn in alts: if self._cs_path_exists(fn): got_it = True self.filelist.append(fn) break if not got_it: self.warn("standard file not found: should have one of " + ', '.join(alts)) else: if self._cs_path_exists(fn): self.filelist.append(fn) else: self.warn("standard file '%s' not found" % fn) def _add_defaults_optional(self): optional = ['test/test*.py', 'setup.cfg'] for pattern in optional: files = filter(os.path.isfile, glob(pattern)) self.filelist.extend(files) def _add_defaults_python(self): # build_py is used to get: # - python modules # - files defined in package_data build_py = self.get_finalized_command('build_py') # getting python files if self.distribution.has_pure_modules(): self.filelist.extend(build_py.get_source_files()) # getting package_data files # (computed in build_py.data_files by build_py.finalize_options) for pkg, src_dir, build_dir, filenames in build_py.data_files: for filename in filenames: self.filelist.append(os.path.join(src_dir, filename)) def _add_defaults_data_files(self): # getting distribution.data_files if self.distribution.has_data_files(): for item in self.distribution.data_files: if isinstance(item, str): # plain file item = convert_path(item) if os.path.isfile(item): self.filelist.append(item) else: # a (dirname, filenames) tuple dirname, filenames = item for f in filenames: f = convert_path(f) if os.path.isfile(f): self.filelist.append(f) def _add_defaults_ext(self): if self.distribution.has_ext_modules(): build_ext = self.get_finalized_command('build_ext') self.filelist.extend(build_ext.get_source_files()) def _add_defaults_c_libs(self): if self.distribution.has_c_libraries(): build_clib = self.get_finalized_command('build_clib') self.filelist.extend(build_clib.get_source_files()) def _add_defaults_scripts(self): if self.distribution.has_scripts(): build_scripts = self.get_finalized_command('build_scripts') self.filelist.extend(build_scripts.get_source_files()) def read_template(self): """Read and parse manifest template file named by self.template. (usually "MANIFEST.in") The parsing and processing is done by 'self.filelist', which updates itself accordingly. """ log.info("reading manifest template '%s'", self.template) template = TextFile(self.template, strip_comments=1, skip_blanks=1, join_lines=1, lstrip_ws=1, rstrip_ws=1, collapse_join=1) try: while True: line = template.readline() if line is None: # end of file break try: self.filelist.process_template_line(line) # the call above can raise a DistutilsTemplateError for # malformed lines, or a ValueError from the lower-level # convert_path function except (DistutilsTemplateError, ValueError) as msg: self.warn("%s, line %d: %s" % (template.filename, template.current_line, msg)) finally: template.close() def prune_file_list(self): """Prune off branches that might slip into the file list as created by 'read_template()', but really don't belong there: * the build tree (typically "build") * the release tree itself (only an issue if we ran "sdist" previously with --keep-temp, or it aborted) * any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories """ build = self.get_finalized_command('build') base_dir = self.distribution.get_fullname() self.filelist.exclude_pattern(None, prefix=build.build_base) self.filelist.exclude_pattern(None, prefix=base_dir) if sys.platform == 'win32': seps = r'/|\\' else: seps = '/' vcs_dirs = ['RCS', 'CVS', r'\.svn', r'\.hg', r'\.git', r'\.bzr', '_darcs'] vcs_ptrn = r'(^|%s)(%s)(%s).*' % (seps, '|'.join(vcs_dirs), seps) self.filelist.exclude_pattern(vcs_ptrn, is_regex=1) def write_manifest(self): """Write the file list in 'self.filelist' (presumably as filled in by 'add_defaults()' and 'read_template()') to the manifest file named by 'self.manifest'. """ if self._manifest_is_not_generated(): log.info("not writing to manually maintained " "manifest file '%s'" % self.manifest) return content = self.filelist.files[:] content.insert(0, '# file GENERATED by distutils, do NOT edit') self.execute(file_util.write_file, (self.manifest, content), "writing manifest file '%s'" % self.manifest) def _manifest_is_not_generated(self): # check for special comment used in 3.1.3 and higher if not os.path.isfile(self.manifest): return False fp = open(self.manifest) try: first_line = fp.readline() finally: fp.close() return first_line != '# file GENERATED by distutils, do NOT edit\n' def read_manifest(self): """Read the manifest file (named by 'self.manifest') and use it to fill in 'self.filelist', the list of files to include in the source distribution. """ log.info("reading manifest file '%s'", self.manifest) with open(self.manifest) as manifest: for line in manifest: # ignore comments and blank lines line = line.strip() if line.startswith('#') or not line: continue self.filelist.append(line) def make_release_tree(self, base_dir, files): """Create the directory tree that will become the source distribution archive. All directories implied by the filenames in 'files' are created under 'base_dir', and then we hard link or copy (if hard linking is unavailable) those files into place. Essentially, this duplicates the developer's source tree, but in a directory named after the distribution, containing only the files to be distributed. """ # Create all the directories under 'base_dir' necessary to # put 'files' there; the 'mkpath()' is just so we don't die # if the manifest happens to be empty. self.mkpath(base_dir) dir_util.create_tree(base_dir, files, dry_run=self.dry_run) # And walk over the list of files, either making a hard link (if # os.link exists) to each one that doesn't already exist in its # corresponding location under 'base_dir', or copying each file # that's out-of-date in 'base_dir'. (Usually, all files will be # out-of-date, because by default we blow away 'base_dir' when # we're done making the distribution archives.) if hasattr(os, 'link'): # can make hard links on this system link = 'hard' msg = "making hard links in %s..." % base_dir else: # nope, have to copy link = None msg = "copying files to %s..." % base_dir if not files: log.warn("no files to distribute -- empty manifest?") else: log.info(msg) for file in files: if not os.path.isfile(file): log.warn("'%s' not a regular file -- skipping", file) else: dest = os.path.join(base_dir, file) self.copy_file(file, dest, link=link) self.distribution.metadata.write_pkg_info(base_dir) def make_distribution(self): """Create the source distribution(s). First, we create the release tree with 'make_release_tree()'; then, we create all required archive files (according to 'self.formats') from the release tree. Finally, we clean up by blowing away the release tree (unless 'self.keep_temp' is true). The list of archive files created is stored so it can be retrieved later by 'get_archive_files()'. """ # Don't warn about missing meta-data here -- should be (and is!) # done elsewhere. base_dir = self.distribution.get_fullname() base_name = os.path.join(self.dist_dir, base_dir) self.make_release_tree(base_dir, self.filelist.files) archive_files = [] # remember names of files we create # tar archive must be created last to avoid overwrite and remove if 'tar' in self.formats: self.formats.append(self.formats.pop(self.formats.index('tar'))) for fmt in self.formats: file = self.make_archive(base_name, fmt, base_dir=base_dir, owner=self.owner, group=self.group) archive_files.append(file) self.distribution.dist_files.append(('sdist', '', file)) self.archive_files = archive_files if not self.keep_temp: dir_util.remove_tree(base_dir, dry_run=self.dry_run) def get_archive_files(self): """Return the list of archive files created when the command was run, or None if the command hasn't run yet. """ return self.archive_files
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/bdist.py
"""distutils.command.bdist Implements the Distutils 'bdist' command (create a built [binary] distribution).""" import os from distutils.core import Command from distutils.errors import * from distutils.util import get_platform def show_formats(): """Print list of available formats (arguments to "--format" option). """ from distutils.fancy_getopt import FancyGetopt formats = [] for format in bdist.format_commands: formats.append(("formats=" + format, None, bdist.format_command[format][1])) pretty_printer = FancyGetopt(formats) pretty_printer.print_help("List of available distribution formats:") class bdist(Command): description = "create a built (binary) distribution" user_options = [('bdist-base=', 'b', "temporary directory for creating built distributions"), ('plat-name=', 'p', "platform name to embed in generated filenames " "(default: %s)" % get_platform()), ('formats=', None, "formats for distribution (comma-separated list)"), ('dist-dir=', 'd', "directory to put final built distributions in " "[default: dist]"), ('skip-build', None, "skip rebuilding everything (for testing/debugging)"), ('owner=', 'u', "Owner name used when creating a tar file" " [default: current user]"), ('group=', 'g', "Group name used when creating a tar file" " [default: current group]"), ] boolean_options = ['skip-build'] help_options = [ ('help-formats', None, "lists available distribution formats", show_formats), ] # The following commands do not take a format option from bdist no_format_option = ('bdist_rpm',) # This won't do in reality: will need to distinguish RPM-ish Linux, # Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS. default_format = {'posix': 'gztar', 'nt': 'zip'} # Establish the preferred order (for the --help-formats option). format_commands = ['rpm', 'gztar', 'bztar', 'xztar', 'ztar', 'tar', 'wininst', 'zip', 'msi'] # And the real information. format_command = {'rpm': ('bdist_rpm', "RPM distribution"), 'gztar': ('bdist_dumb', "gzip'ed tar file"), 'bztar': ('bdist_dumb', "bzip2'ed tar file"), 'xztar': ('bdist_dumb', "xz'ed tar file"), 'ztar': ('bdist_dumb', "compressed tar file"), 'tar': ('bdist_dumb', "tar file"), 'wininst': ('bdist_wininst', "Windows executable installer"), 'zip': ('bdist_dumb', "ZIP file"), 'msi': ('bdist_msi', "Microsoft Installer") } def initialize_options(self): self.bdist_base = None self.plat_name = None self.formats = None self.dist_dir = None self.skip_build = 0 self.group = None self.owner = None def finalize_options(self): # have to finalize 'plat_name' before 'bdist_base' if self.plat_name is None: if self.skip_build: self.plat_name = get_platform() else: self.plat_name = self.get_finalized_command('build').plat_name # 'bdist_base' -- parent of per-built-distribution-format # temporary directories (eg. we'll probably have # "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.) if self.bdist_base is None: build_base = self.get_finalized_command('build').build_base self.bdist_base = os.path.join(build_base, 'bdist.' + self.plat_name) self.ensure_string_list('formats') if self.formats is None: try: self.formats = [self.default_format[os.name]] except KeyError: raise DistutilsPlatformError( "don't know how to create built distributions " "on platform %s" % os.name) if self.dist_dir is None: self.dist_dir = "dist" def run(self): # Figure out which sub-commands we need to run. commands = [] for format in self.formats: try: commands.append(self.format_command[format][0]) except KeyError: raise DistutilsOptionError("invalid format '%s'" % format) # Reinitialize and run each command. for i in range(len(self.formats)): cmd_name = commands[i] sub_cmd = self.reinitialize_command(cmd_name) if cmd_name not in self.no_format_option: sub_cmd.format = self.formats[i] # passing the owner and group names for tar archiving if cmd_name == 'bdist_dumb': sub_cmd.owner = self.owner sub_cmd.group = self.group # If we're going to need to run this command again, tell it to # keep its temporary files around so subsequent runs go faster. if cmd_name in commands[i+1:]: sub_cmd.keep_temp = 1 self.run_command(cmd_name)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/build_scripts.py
"""distutils.command.build_scripts Implements the Distutils 'build_scripts' command.""" import os, re from stat import ST_MODE from distutils import sysconfig from distutils.core import Command from distutils.dep_util import newer from distutils.util import convert_path, Mixin2to3 from distutils import log import tokenize # check if Python is called on the first line with this expression first_line_re = re.compile(b'^#!.*python[0-9.]*([ \t].*)?$') class build_scripts(Command): description = "\"build\" scripts (copy and fixup #! line)" user_options = [ ('build-dir=', 'd', "directory to \"build\" (copy) to"), ('force', 'f', "forcibly build everything (ignore file timestamps"), ('executable=', 'e', "specify final destination interpreter path"), ] boolean_options = ['force'] def initialize_options(self): self.build_dir = None self.scripts = None self.force = None self.executable = None self.outfiles = None def finalize_options(self): self.set_undefined_options('build', ('build_scripts', 'build_dir'), ('force', 'force'), ('executable', 'executable')) self.scripts = self.distribution.scripts def get_source_files(self): return self.scripts def run(self): if not self.scripts: return self.copy_scripts() def copy_scripts(self): r"""Copy each script listed in 'self.scripts'; if it's marked as a Python script in the Unix way (first line matches 'first_line_re', ie. starts with "\#!" and contains "python"), then adjust the first line to refer to the current Python interpreter as we copy. """ self.mkpath(self.build_dir) outfiles = [] updated_files = [] for script in self.scripts: adjust = False script = convert_path(script) outfile = os.path.join(self.build_dir, os.path.basename(script)) outfiles.append(outfile) if not self.force and not newer(script, outfile): log.debug("not copying %s (up-to-date)", script) continue # Always open the file, but ignore failures in dry-run mode -- # that way, we'll get accurate feedback if we can read the # script. try: f = open(script, "rb") except OSError: if not self.dry_run: raise f = None else: encoding, lines = tokenize.detect_encoding(f.readline) f.seek(0) first_line = f.readline() if not first_line: self.warn("%s is an empty file (skipping)" % script) continue match = first_line_re.match(first_line) if match: adjust = True post_interp = match.group(1) or b'' if adjust: log.info("copying and adjusting %s -> %s", script, self.build_dir) updated_files.append(outfile) if not self.dry_run: if not sysconfig.python_build: executable = self.executable else: executable = os.path.join( sysconfig.get_config_var("BINDIR"), "python%s%s" % (sysconfig.get_config_var("VERSION"), sysconfig.get_config_var("EXE"))) executable = os.fsencode(executable) shebang = b"#!" + executable + post_interp + b"\n" # Python parser starts to read a script using UTF-8 until # it gets a #coding:xxx cookie. The shebang has to be the # first line of a file, the #coding:xxx cookie cannot be # written before. So the shebang has to be decodable from # UTF-8. try: shebang.decode('utf-8') except UnicodeDecodeError: raise ValueError( "The shebang ({!r}) is not decodable " "from utf-8".format(shebang)) # If the script is encoded to a custom encoding (use a # #coding:xxx cookie), the shebang has to be decodable from # the script encoding too. try: shebang.decode(encoding) except UnicodeDecodeError: raise ValueError( "The shebang ({!r}) is not decodable " "from the script encoding ({})" .format(shebang, encoding)) with open(outfile, "wb") as outf: outf.write(shebang) outf.writelines(f.readlines()) if f: f.close() else: if f: f.close() updated_files.append(outfile) self.copy_file(script, outfile) if os.name == 'posix': for file in outfiles: if self.dry_run: log.info("changing mode of %s", file) else: oldmode = os.stat(file)[ST_MODE] & 0o7777 newmode = (oldmode | 0o555) & 0o7777 if newmode != oldmode: log.info("changing mode of %s from %o to %o", file, oldmode, newmode) os.chmod(file, newmode) # XXX should we modify self.outfiles? return outfiles, updated_files class build_scripts_2to3(build_scripts, Mixin2to3): def copy_scripts(self): outfiles, updated_files = build_scripts.copy_scripts(self) if not self.dry_run: self.run_2to3(updated_files) return outfiles, updated_files
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/bdist_rpm.py
"""distutils.command.bdist_rpm Implements the Distutils 'bdist_rpm' command (create RPM source and binary distributions).""" import subprocess, sys, os from distutils.core import Command from distutils.debug import DEBUG from distutils.file_util import write_file from distutils.errors import * from distutils.sysconfig import get_python_version from distutils import log class bdist_rpm(Command): description = "create an RPM distribution" user_options = [ ('bdist-base=', None, "base directory for creating built distributions"), ('rpm-base=', None, "base directory for creating RPMs (defaults to \"rpm\" under " "--bdist-base; must be specified for RPM 2)"), ('dist-dir=', 'd', "directory to put final RPM files in " "(and .spec files if --spec-only)"), ('python=', None, "path to Python interpreter to hard-code in the .spec file " "(default: \"python\")"), ('fix-python', None, "hard-code the exact path to the current Python interpreter in " "the .spec file"), ('spec-only', None, "only regenerate spec file"), ('source-only', None, "only generate source RPM"), ('binary-only', None, "only generate binary RPM"), ('use-bzip2', None, "use bzip2 instead of gzip to create source distribution"), # More meta-data: too RPM-specific to put in the setup script, # but needs to go in the .spec file -- so we make these options # to "bdist_rpm". The idea is that packagers would put this # info in setup.cfg, although they are of course free to # supply it on the command line. ('distribution-name=', None, "name of the (Linux) distribution to which this " "RPM applies (*not* the name of the module distribution!)"), ('group=', None, "package classification [default: \"Development/Libraries\"]"), ('release=', None, "RPM release number"), ('serial=', None, "RPM serial number"), ('vendor=', None, "RPM \"vendor\" (eg. \"Joe Blow <joe@example.com>\") " "[default: maintainer or author from setup script]"), ('packager=', None, "RPM packager (eg. \"Jane Doe <jane@example.net>\") " "[default: vendor]"), ('doc-files=', None, "list of documentation files (space or comma-separated)"), ('changelog=', None, "RPM changelog"), ('icon=', None, "name of icon file"), ('provides=', None, "capabilities provided by this package"), ('requires=', None, "capabilities required by this package"), ('conflicts=', None, "capabilities which conflict with this package"), ('build-requires=', None, "capabilities required to build this package"), ('obsoletes=', None, "capabilities made obsolete by this package"), ('no-autoreq', None, "do not automatically calculate dependencies"), # Actions to take when building RPM ('keep-temp', 'k', "don't clean up RPM build directory"), ('no-keep-temp', None, "clean up RPM build directory [default]"), ('use-rpm-opt-flags', None, "compile with RPM_OPT_FLAGS when building from source RPM"), ('no-rpm-opt-flags', None, "do not pass any RPM CFLAGS to compiler"), ('rpm3-mode', None, "RPM 3 compatibility mode (default)"), ('rpm2-mode', None, "RPM 2 compatibility mode"), # Add the hooks necessary for specifying custom scripts ('prep-script=', None, "Specify a script for the PREP phase of RPM building"), ('build-script=', None, "Specify a script for the BUILD phase of RPM building"), ('pre-install=', None, "Specify a script for the pre-INSTALL phase of RPM building"), ('install-script=', None, "Specify a script for the INSTALL phase of RPM building"), ('post-install=', None, "Specify a script for the post-INSTALL phase of RPM building"), ('pre-uninstall=', None, "Specify a script for the pre-UNINSTALL phase of RPM building"), ('post-uninstall=', None, "Specify a script for the post-UNINSTALL phase of RPM building"), ('clean-script=', None, "Specify a script for the CLEAN phase of RPM building"), ('verify-script=', None, "Specify a script for the VERIFY phase of the RPM build"), # Allow a packager to explicitly force an architecture ('force-arch=', None, "Force an architecture onto the RPM build process"), ('quiet', 'q', "Run the INSTALL phase of RPM building in quiet mode"), ] boolean_options = ['keep-temp', 'use-rpm-opt-flags', 'rpm3-mode', 'no-autoreq', 'quiet'] negative_opt = {'no-keep-temp': 'keep-temp', 'no-rpm-opt-flags': 'use-rpm-opt-flags', 'rpm2-mode': 'rpm3-mode'} def initialize_options(self): self.bdist_base = None self.rpm_base = None self.dist_dir = None self.python = None self.fix_python = None self.spec_only = None self.binary_only = None self.source_only = None self.use_bzip2 = None self.distribution_name = None self.group = None self.release = None self.serial = None self.vendor = None self.packager = None self.doc_files = None self.changelog = None self.icon = None self.prep_script = None self.build_script = None self.install_script = None self.clean_script = None self.verify_script = None self.pre_install = None self.post_install = None self.pre_uninstall = None self.post_uninstall = None self.prep = None self.provides = None self.requires = None self.conflicts = None self.build_requires = None self.obsoletes = None self.keep_temp = 0 self.use_rpm_opt_flags = 1 self.rpm3_mode = 1 self.no_autoreq = 0 self.force_arch = None self.quiet = 0 def finalize_options(self): self.set_undefined_options('bdist', ('bdist_base', 'bdist_base')) if self.rpm_base is None: if not self.rpm3_mode: raise DistutilsOptionError( "you must specify --rpm-base in RPM 2 mode") self.rpm_base = os.path.join(self.bdist_base, "rpm") if self.python is None: if self.fix_python: self.python = sys.executable else: self.python = "python3" elif self.fix_python: raise DistutilsOptionError( "--python and --fix-python are mutually exclusive options") if os.name != 'posix': raise DistutilsPlatformError("don't know how to create RPM " "distributions on platform %s" % os.name) if self.binary_only and self.source_only: raise DistutilsOptionError( "cannot supply both '--source-only' and '--binary-only'") # don't pass CFLAGS to pure python distributions if not self.distribution.has_ext_modules(): self.use_rpm_opt_flags = 0 self.set_undefined_options('bdist', ('dist_dir', 'dist_dir')) self.finalize_package_data() def finalize_package_data(self): self.ensure_string('group', "Development/Libraries") self.ensure_string('vendor', "%s <%s>" % (self.distribution.get_contact(), self.distribution.get_contact_email())) self.ensure_string('packager') self.ensure_string_list('doc_files') if isinstance(self.doc_files, list): for readme in ('README', 'README.txt'): if os.path.exists(readme) and readme not in self.doc_files: self.doc_files.append(readme) self.ensure_string('release', "1") self.ensure_string('serial') # should it be an int? self.ensure_string('distribution_name') self.ensure_string('changelog') # Format changelog correctly self.changelog = self._format_changelog(self.changelog) self.ensure_filename('icon') self.ensure_filename('prep_script') self.ensure_filename('build_script') self.ensure_filename('install_script') self.ensure_filename('clean_script') self.ensure_filename('verify_script') self.ensure_filename('pre_install') self.ensure_filename('post_install') self.ensure_filename('pre_uninstall') self.ensure_filename('post_uninstall') # XXX don't forget we punted on summaries and descriptions -- they # should be handled here eventually! # Now *this* is some meta-data that belongs in the setup script... self.ensure_string_list('provides') self.ensure_string_list('requires') self.ensure_string_list('conflicts') self.ensure_string_list('build_requires') self.ensure_string_list('obsoletes') self.ensure_string('force_arch') def run(self): if DEBUG: print("before _get_package_data():") print("vendor =", self.vendor) print("packager =", self.packager) print("doc_files =", self.doc_files) print("changelog =", self.changelog) # make directories if self.spec_only: spec_dir = self.dist_dir self.mkpath(spec_dir) else: rpm_dir = {} for d in ('SOURCES', 'SPECS', 'BUILD', 'RPMS', 'SRPMS'): rpm_dir[d] = os.path.join(self.rpm_base, d) self.mkpath(rpm_dir[d]) spec_dir = rpm_dir['SPECS'] # Spec file goes into 'dist_dir' if '--spec-only specified', # build/rpm.<plat> otherwise. spec_path = os.path.join(spec_dir, "%s.spec" % self.distribution.get_name()) self.execute(write_file, (spec_path, self._make_spec_file()), "writing '%s'" % spec_path) if self.spec_only: # stop if requested return # Make a source distribution and copy to SOURCES directory with # optional icon. saved_dist_files = self.distribution.dist_files[:] sdist = self.reinitialize_command('sdist') if self.use_bzip2: sdist.formats = ['bztar'] else: sdist.formats = ['gztar'] self.run_command('sdist') self.distribution.dist_files = saved_dist_files source = sdist.get_archive_files()[0] source_dir = rpm_dir['SOURCES'] self.copy_file(source, source_dir) if self.icon: if os.path.exists(self.icon): self.copy_file(self.icon, source_dir) else: raise DistutilsFileError( "icon file '%s' does not exist" % self.icon) # build package log.info("building RPMs") rpm_cmd = ['rpmbuild'] if self.source_only: # what kind of RPMs? rpm_cmd.append('-bs') elif self.binary_only: rpm_cmd.append('-bb') else: rpm_cmd.append('-ba') rpm_cmd.extend(['--define', '__python %s' % self.python]) if self.rpm3_mode: rpm_cmd.extend(['--define', '_topdir %s' % os.path.abspath(self.rpm_base)]) if not self.keep_temp: rpm_cmd.append('--clean') if self.quiet: rpm_cmd.append('--quiet') rpm_cmd.append(spec_path) # Determine the binary rpm names that should be built out of this spec # file # Note that some of these may not be really built (if the file # list is empty) nvr_string = "%{name}-%{version}-%{release}" src_rpm = nvr_string + ".src.rpm" non_src_rpm = "%{arch}/" + nvr_string + ".%{arch}.rpm" q_cmd = r"rpm -q --qf '%s %s\n' --specfile '%s'" % ( src_rpm, non_src_rpm, spec_path) out = os.popen(q_cmd) try: binary_rpms = [] source_rpm = None while True: line = out.readline() if not line: break l = line.strip().split() assert(len(l) == 2) binary_rpms.append(l[1]) # The source rpm is named after the first entry in the spec file if source_rpm is None: source_rpm = l[0] status = out.close() if status: raise DistutilsExecError("Failed to execute: %s" % repr(q_cmd)) finally: out.close() self.spawn(rpm_cmd) if not self.dry_run: if self.distribution.has_ext_modules(): pyversion = get_python_version() else: pyversion = 'any' if not self.binary_only: srpm = os.path.join(rpm_dir['SRPMS'], source_rpm) assert(os.path.exists(srpm)) self.move_file(srpm, self.dist_dir) filename = os.path.join(self.dist_dir, source_rpm) self.distribution.dist_files.append( ('bdist_rpm', pyversion, filename)) if not self.source_only: for rpm in binary_rpms: rpm = os.path.join(rpm_dir['RPMS'], rpm) if os.path.exists(rpm): self.move_file(rpm, self.dist_dir) filename = os.path.join(self.dist_dir, os.path.basename(rpm)) self.distribution.dist_files.append( ('bdist_rpm', pyversion, filename)) def _dist_path(self, path): return os.path.join(self.dist_dir, os.path.basename(path)) def _make_spec_file(self): """Generate the text of an RPM spec file and return it as a list of strings (one per line). """ # definitions and headers spec_file = [ '%define name ' + self.distribution.get_name(), '%define version ' + self.distribution.get_version().replace('-','_'), '%define unmangled_version ' + self.distribution.get_version(), '%define release ' + self.release.replace('-','_'), '', 'Summary: ' + self.distribution.get_description(), ] # Workaround for #14443 which affects some RPM based systems such as # RHEL6 (and probably derivatives) vendor_hook = subprocess.getoutput('rpm --eval %{__os_install_post}') # Generate a potential replacement value for __os_install_post (whilst # normalizing the whitespace to simplify the test for whether the # invocation of brp-python-bytecompile passes in __python): vendor_hook = '\n'.join([' %s \\' % line.strip() for line in vendor_hook.splitlines()]) problem = "brp-python-bytecompile \\\n" fixed = "brp-python-bytecompile %{__python} \\\n" fixed_hook = vendor_hook.replace(problem, fixed) if fixed_hook != vendor_hook: spec_file.append('# Workaround for http://bugs.python.org/issue14443') spec_file.append('%define __os_install_post ' + fixed_hook + '\n') # put locale summaries into spec file # XXX not supported for now (hard to put a dictionary # in a config file -- arg!) #for locale in self.summaries.keys(): # spec_file.append('Summary(%s): %s' % (locale, # self.summaries[locale])) spec_file.extend([ 'Name: %{name}', 'Version: %{version}', 'Release: %{release}',]) # XXX yuck! this filename is available from the "sdist" command, # but only after it has run: and we create the spec file before # running "sdist", in case of --spec-only. if self.use_bzip2: spec_file.append('Source0: %{name}-%{unmangled_version}.tar.bz2') else: spec_file.append('Source0: %{name}-%{unmangled_version}.tar.gz') spec_file.extend([ 'License: ' + self.distribution.get_license(), 'Group: ' + self.group, 'BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot', 'Prefix: %{_prefix}', ]) if not self.force_arch: # noarch if no extension modules if not self.distribution.has_ext_modules(): spec_file.append('BuildArch: noarch') else: spec_file.append( 'BuildArch: %s' % self.force_arch ) for field in ('Vendor', 'Packager', 'Provides', 'Requires', 'Conflicts', 'Obsoletes', ): val = getattr(self, field.lower()) if isinstance(val, list): spec_file.append('%s: %s' % (field, ' '.join(val))) elif val is not None: spec_file.append('%s: %s' % (field, val)) if self.distribution.get_url() != 'UNKNOWN': spec_file.append('Url: ' + self.distribution.get_url()) if self.distribution_name: spec_file.append('Distribution: ' + self.distribution_name) if self.build_requires: spec_file.append('BuildRequires: ' + ' '.join(self.build_requires)) if self.icon: spec_file.append('Icon: ' + os.path.basename(self.icon)) if self.no_autoreq: spec_file.append('AutoReq: 0') spec_file.extend([ '', '%description', self.distribution.get_long_description() ]) # put locale descriptions into spec file # XXX again, suppressed because config file syntax doesn't # easily support this ;-( #for locale in self.descriptions.keys(): # spec_file.extend([ # '', # '%description -l ' + locale, # self.descriptions[locale], # ]) # rpm scripts # figure out default build script def_setup_call = "%s %s" % (self.python,os.path.basename(sys.argv[0])) def_build = "%s build" % def_setup_call if self.use_rpm_opt_flags: def_build = 'env CFLAGS="$RPM_OPT_FLAGS" ' + def_build # insert contents of files # XXX this is kind of misleading: user-supplied options are files # that we open and interpolate into the spec file, but the defaults # are just text that we drop in as-is. Hmmm. install_cmd = ('%s install -O1 --root=$RPM_BUILD_ROOT ' '--record=INSTALLED_FILES') % def_setup_call script_options = [ ('prep', 'prep_script', "%setup -n %{name}-%{unmangled_version}"), ('build', 'build_script', def_build), ('install', 'install_script', install_cmd), ('clean', 'clean_script', "rm -rf $RPM_BUILD_ROOT"), ('verifyscript', 'verify_script', None), ('pre', 'pre_install', None), ('post', 'post_install', None), ('preun', 'pre_uninstall', None), ('postun', 'post_uninstall', None), ] for (rpm_opt, attr, default) in script_options: # Insert contents of file referred to, if no file is referred to # use 'default' as contents of script val = getattr(self, attr) if val or default: spec_file.extend([ '', '%' + rpm_opt,]) if val: with open(val) as f: spec_file.extend(f.read().split('\n')) else: spec_file.append(default) # files section spec_file.extend([ '', '%files -f INSTALLED_FILES', '%defattr(-,root,root)', ]) if self.doc_files: spec_file.append('%doc ' + ' '.join(self.doc_files)) if self.changelog: spec_file.extend([ '', '%changelog',]) spec_file.extend(self.changelog) return spec_file def _format_changelog(self, changelog): """Format the changelog correctly and convert it to a list of strings """ if not changelog: return changelog new_changelog = [] for line in changelog.strip().split('\n'): line = line.strip() if line[0] == '*': new_changelog.extend(['', line]) elif line[0] == '-': new_changelog.append(line) else: new_changelog.append(' ' + line) # strip trailing newline inserted by first changelog entry if not new_changelog[0]: del new_changelog[0] return new_changelog
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/build_clib.py
"""distutils.command.build_clib Implements the Distutils 'build_clib' command, to build a C/C++ library that is included in the module distribution and needed by an extension module.""" # XXX this module has *lots* of code ripped-off quite transparently from # build_ext.py -- not surprisingly really, as the work required to build # a static library from a collection of C source files is not really all # that different from what's required to build a shared object file from # a collection of C source files. Nevertheless, I haven't done the # necessary refactoring to account for the overlap in code between the # two modules, mainly because a number of subtle details changed in the # cut 'n paste. Sigh. import os from distutils.core import Command from distutils.errors import * from distutils.sysconfig import customize_compiler from distutils import log def show_compilers(): from distutils.ccompiler import show_compilers show_compilers() class build_clib(Command): description = "build C/C++ libraries used by Python extensions" user_options = [ ('build-clib=', 'b', "directory to build C/C++ libraries to"), ('build-temp=', 't', "directory to put temporary build by-products"), ('debug', 'g', "compile with debugging information"), ('force', 'f', "forcibly build everything (ignore file timestamps)"), ('compiler=', 'c', "specify the compiler type"), ] boolean_options = ['debug', 'force'] help_options = [ ('help-compiler', None, "list available compilers", show_compilers), ] def initialize_options(self): self.build_clib = None self.build_temp = None # List of libraries to build self.libraries = None # Compilation options for all libraries self.include_dirs = None self.define = None self.undef = None self.debug = None self.force = 0 self.compiler = None def finalize_options(self): # This might be confusing: both build-clib and build-temp default # to build-temp as defined by the "build" command. This is because # I think that C libraries are really just temporary build # by-products, at least from the point of view of building Python # extensions -- but I want to keep my options open. self.set_undefined_options('build', ('build_temp', 'build_clib'), ('build_temp', 'build_temp'), ('compiler', 'compiler'), ('debug', 'debug'), ('force', 'force')) self.libraries = self.distribution.libraries if self.libraries: self.check_library_list(self.libraries) if self.include_dirs is None: self.include_dirs = self.distribution.include_dirs or [] if isinstance(self.include_dirs, str): self.include_dirs = self.include_dirs.split(os.pathsep) # XXX same as for build_ext -- what about 'self.define' and # 'self.undef' ? def run(self): if not self.libraries: return # Yech -- this is cut 'n pasted from build_ext.py! from distutils.ccompiler import new_compiler self.compiler = new_compiler(compiler=self.compiler, dry_run=self.dry_run, force=self.force) customize_compiler(self.compiler) if self.include_dirs is not None: self.compiler.set_include_dirs(self.include_dirs) if self.define is not None: # 'define' option is a list of (name,value) tuples for (name,value) in self.define: self.compiler.define_macro(name, value) if self.undef is not None: for macro in self.undef: self.compiler.undefine_macro(macro) self.build_libraries(self.libraries) def check_library_list(self, libraries): """Ensure that the list of libraries is valid. `library` is presumably provided as a command option 'libraries'. This method checks that it is a list of 2-tuples, where the tuples are (library_name, build_info_dict). Raise DistutilsSetupError if the structure is invalid anywhere; just returns otherwise. """ if not isinstance(libraries, list): raise DistutilsSetupError( "'libraries' option must be a list of tuples") for lib in libraries: if not isinstance(lib, tuple) and len(lib) != 2: raise DistutilsSetupError( "each element of 'libraries' must a 2-tuple") name, build_info = lib if not isinstance(name, str): raise DistutilsSetupError( "first element of each tuple in 'libraries' " "must be a string (the library name)") if '/' in name or (os.sep != '/' and os.sep in name): raise DistutilsSetupError("bad library name '%s': " "may not contain directory separators" % lib[0]) if not isinstance(build_info, dict): raise DistutilsSetupError( "second element of each tuple in 'libraries' " "must be a dictionary (build info)") def get_library_names(self): # Assume the library list is valid -- 'check_library_list()' is # called from 'finalize_options()', so it should be! if not self.libraries: return None lib_names = [] for (lib_name, build_info) in self.libraries: lib_names.append(lib_name) return lib_names def get_source_files(self): self.check_library_list(self.libraries) filenames = [] for (lib_name, build_info) in self.libraries: sources = build_info.get('sources') if sources is None or not isinstance(sources, (list, tuple)): raise DistutilsSetupError( "in 'libraries' option (library '%s'), " "'sources' must be present and must be " "a list of source filenames" % lib_name) filenames.extend(sources) return filenames def build_libraries(self, libraries): for (lib_name, build_info) in libraries: sources = build_info.get('sources') if sources is None or not isinstance(sources, (list, tuple)): raise DistutilsSetupError( "in 'libraries' option (library '%s'), " "'sources' must be present and must be " "a list of source filenames" % lib_name) sources = list(sources) log.info("building '%s' library", lib_name) # First, compile the source code to object files in the library # directory. (This should probably change to putting object # files in a temporary build directory.) macros = build_info.get('macros') include_dirs = build_info.get('include_dirs') objects = self.compiler.compile(sources, output_dir=self.build_temp, macros=macros, include_dirs=include_dirs, debug=self.debug) # Now "link" the object files together into a static library. # (On Unix at least, this isn't really linking -- it just # builds an archive. Whatever.) self.compiler.create_static_lib(objects, lib_name, output_dir=self.build_clib, debug=self.debug)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/install.py
"""distutils.command.install Implements the Distutils 'install' command.""" import sys import os from distutils import log from distutils.core import Command from distutils.debug import DEBUG from distutils.sysconfig import get_config_vars from distutils.errors import DistutilsPlatformError from distutils.file_util import write_file from distutils.util import convert_path, subst_vars, change_root from distutils.util import get_platform from distutils.errors import DistutilsOptionError from site import USER_BASE from site import USER_SITE HAS_USER_SITE = True WINDOWS_SCHEME = { 'purelib': '$base/Lib/site-packages', 'platlib': '$base/Lib/site-packages', 'headers': '$base/Include/$dist_name', 'scripts': '$base/Scripts', 'data' : '$base', } INSTALL_SCHEMES = { 'unix_prefix': { 'purelib': '$base/lib/python$py_version_short/site-packages', 'platlib': '$platbase/$platlibdir/python$py_version_short/site-packages', 'headers': '$base/include/python$py_version_short$abiflags/$dist_name', 'scripts': '$base/bin', 'data' : '$base', }, 'unix_home': { 'purelib': '$base/lib/python', 'platlib': '$base/$platlibdir/python', 'headers': '$base/include/python/$dist_name', 'scripts': '$base/bin', 'data' : '$base', }, 'nt': WINDOWS_SCHEME, 'pypy': { 'purelib': '$base/site-packages', 'platlib': '$base/site-packages', 'headers': '$base/include/$dist_name', 'scripts': '$base/bin', 'data' : '$base', }, 'pypy_nt': { 'purelib': '$base/site-packages', 'platlib': '$base/site-packages', 'headers': '$base/include/$dist_name', 'scripts': '$base/Scripts', 'data' : '$base', }, } # user site schemes if HAS_USER_SITE: INSTALL_SCHEMES['nt_user'] = { 'purelib': '$usersite', 'platlib': '$usersite', 'headers': '$userbase/Python$py_version_nodot/Include/$dist_name', 'scripts': '$userbase/Python$py_version_nodot/Scripts', 'data' : '$userbase', } INSTALL_SCHEMES['unix_user'] = { 'purelib': '$usersite', 'platlib': '$usersite', 'headers': '$userbase/include/python$py_version_short$abiflags/$dist_name', 'scripts': '$userbase/bin', 'data' : '$userbase', } # The keys to an installation scheme; if any new types of files are to be # installed, be sure to add an entry to every installation scheme above, # and to SCHEME_KEYS here. SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data') class install(Command): description = "install everything from build directory" user_options = [ # Select installation scheme and set base director(y|ies) ('prefix=', None, "installation prefix"), ('exec-prefix=', None, "(Unix only) prefix for platform-specific files"), ('home=', None, "(Unix only) home directory to install under"), # Or, just set the base director(y|ies) ('install-base=', None, "base installation directory (instead of --prefix or --home)"), ('install-platbase=', None, "base installation directory for platform-specific files " + "(instead of --exec-prefix or --home)"), ('root=', None, "install everything relative to this alternate root directory"), # Or, explicitly set the installation scheme ('install-purelib=', None, "installation directory for pure Python module distributions"), ('install-platlib=', None, "installation directory for non-pure module distributions"), ('install-lib=', None, "installation directory for all module distributions " + "(overrides --install-purelib and --install-platlib)"), ('install-headers=', None, "installation directory for C/C++ headers"), ('install-scripts=', None, "installation directory for Python scripts"), ('install-data=', None, "installation directory for data files"), # Byte-compilation options -- see install_lib.py for details, as # these are duplicated from there (but only install_lib does # anything with them). ('compile', 'c', "compile .py to .pyc [default]"), ('no-compile', None, "don't compile .py files"), ('optimize=', 'O', "also compile with optimization: -O1 for \"python -O\", " "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), # Miscellaneous control options ('force', 'f', "force installation (overwrite any existing files)"), ('skip-build', None, "skip rebuilding everything (for testing/debugging)"), # Where to install documentation (eventually!) #('doc-format=', None, "format of documentation to generate"), #('install-man=', None, "directory for Unix man pages"), #('install-html=', None, "directory for HTML documentation"), #('install-info=', None, "directory for GNU info files"), ('record=', None, "filename in which to record list of installed files"), ] boolean_options = ['compile', 'force', 'skip-build'] if HAS_USER_SITE: user_options.append(('user', None, "install in user site-package '%s'" % USER_SITE)) boolean_options.append('user') negative_opt = {'no-compile' : 'compile'} def initialize_options(self): """Initializes options.""" # High-level options: these select both an installation base # and scheme. self.prefix = None self.exec_prefix = None self.home = None self.user = 0 # These select only the installation base; it's up to the user to # specify the installation scheme (currently, that means supplying # the --install-{platlib,purelib,scripts,data} options). self.install_base = None self.install_platbase = None self.root = None # These options are the actual installation directories; if not # supplied by the user, they are filled in using the installation # scheme implied by prefix/exec-prefix/home and the contents of # that installation scheme. self.install_purelib = None # for pure module distributions self.install_platlib = None # non-pure (dists w/ extensions) self.install_headers = None # for C/C++ headers self.install_lib = None # set to either purelib or platlib self.install_scripts = None self.install_data = None self.install_userbase = USER_BASE self.install_usersite = USER_SITE self.compile = None self.optimize = None # Deprecated # These two are for putting non-packagized distributions into their # own directory and creating a .pth file if it makes sense. # 'extra_path' comes from the setup file; 'install_path_file' can # be turned off if it makes no sense to install a .pth file. (But # better to install it uselessly than to guess wrong and not # install it when it's necessary and would be used!) Currently, # 'install_path_file' is always true unless some outsider meddles # with it. self.extra_path = None self.install_path_file = 1 # 'force' forces installation, even if target files are not # out-of-date. 'skip_build' skips running the "build" command, # handy if you know it's not necessary. 'warn_dir' (which is *not* # a user option, it's just there so the bdist_* commands can turn # it off) determines whether we warn about installing to a # directory not in sys.path. self.force = 0 self.skip_build = 0 self.warn_dir = 1 # These are only here as a conduit from the 'build' command to the # 'install_*' commands that do the real work. ('build_base' isn't # actually used anywhere, but it might be useful in future.) They # are not user options, because if the user told the install # command where the build directory is, that wouldn't affect the # build command. self.build_base = None self.build_lib = None # Not defined yet because we don't know anything about # documentation yet. #self.install_man = None #self.install_html = None #self.install_info = None self.record = None # -- Option finalizing methods ------------------------------------- # (This is rather more involved than for most commands, # because this is where the policy for installing third- # party Python modules on various platforms given a wide # array of user input is decided. Yes, it's quite complex!) def finalize_options(self): """Finalizes options.""" # This method (and its helpers, like 'finalize_unix()', # 'finalize_other()', and 'select_scheme()') is where the default # installation directories for modules, extension modules, and # anything else we care to install from a Python module # distribution. Thus, this code makes a pretty important policy # statement about how third-party stuff is added to a Python # installation! Note that the actual work of installation is done # by the relatively simple 'install_*' commands; they just take # their orders from the installation directory options determined # here. # Check for errors/inconsistencies in the options; first, stuff # that's wrong on any platform. if ((self.prefix or self.exec_prefix or self.home) and (self.install_base or self.install_platbase)): raise DistutilsOptionError( "must supply either prefix/exec-prefix/home or " + "install-base/install-platbase -- not both") if self.home and (self.prefix or self.exec_prefix): raise DistutilsOptionError( "must supply either home or prefix/exec-prefix -- not both") if self.user and (self.prefix or self.exec_prefix or self.home or self.install_base or self.install_platbase): raise DistutilsOptionError("can't combine user with prefix, " "exec_prefix/home, or install_(plat)base") # Next, stuff that's wrong (or dubious) only on certain platforms. if os.name != "posix": if self.exec_prefix: self.warn("exec-prefix option ignored on this platform") self.exec_prefix = None # Now the interesting logic -- so interesting that we farm it out # to other methods. The goal of these methods is to set the final # values for the install_{lib,scripts,data,...} options, using as # input a heady brew of prefix, exec_prefix, home, install_base, # install_platbase, user-supplied versions of # install_{purelib,platlib,lib,scripts,data,...}, and the # INSTALL_SCHEME dictionary above. Phew! self.dump_dirs("pre-finalize_{unix,other}") if os.name == 'posix': self.finalize_unix() else: self.finalize_other() self.dump_dirs("post-finalize_{unix,other}()") # Expand configuration variables, tilde, etc. in self.install_base # and self.install_platbase -- that way, we can use $base or # $platbase in the other installation directories and not worry # about needing recursive variable expansion (shudder). py_version = sys.version.split()[0] (prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix') try: abiflags = sys.abiflags except AttributeError: # sys.abiflags may not be defined on all platforms. abiflags = '' self.config_vars = {'dist_name': self.distribution.get_name(), 'dist_version': self.distribution.get_version(), 'dist_fullname': self.distribution.get_fullname(), 'py_version': py_version, 'py_version_short': '%d.%d' % sys.version_info[:2], 'py_version_nodot': '%d%d' % sys.version_info[:2], 'sys_prefix': prefix, 'prefix': prefix, 'sys_exec_prefix': exec_prefix, 'exec_prefix': exec_prefix, 'abiflags': abiflags, 'platlibdir': getattr(sys, 'platlibdir', 'lib'), } if HAS_USER_SITE: self.config_vars['userbase'] = self.install_userbase self.config_vars['usersite'] = self.install_usersite self.expand_basedirs() self.dump_dirs("post-expand_basedirs()") # Now define config vars for the base directories so we can expand # everything else. self.config_vars['base'] = self.install_base self.config_vars['platbase'] = self.install_platbase if DEBUG: from pprint import pprint print("config vars:") pprint(self.config_vars) # Expand "~" and configuration variables in the installation # directories. self.expand_dirs() self.dump_dirs("post-expand_dirs()") # Create directories in the home dir: if self.user: self.create_home_path() # Pick the actual directory to install all modules to: either # install_purelib or install_platlib, depending on whether this # module distribution is pure or not. Of course, if the user # already specified install_lib, use their selection. if self.install_lib is None: if self.distribution.ext_modules: # has extensions: non-pure self.install_lib = self.install_platlib else: self.install_lib = self.install_purelib # Convert directories from Unix /-separated syntax to the local # convention. self.convert_paths('lib', 'purelib', 'platlib', 'scripts', 'data', 'headers', 'userbase', 'usersite') # Deprecated # Well, we're not actually fully completely finalized yet: we still # have to deal with 'extra_path', which is the hack for allowing # non-packagized module distributions (hello, Numerical Python!) to # get their own directories. self.handle_extra_path() self.install_libbase = self.install_lib # needed for .pth file self.install_lib = os.path.join(self.install_lib, self.extra_dirs) # If a new root directory was supplied, make all the installation # dirs relative to it. if self.root is not None: self.change_roots('libbase', 'lib', 'purelib', 'platlib', 'scripts', 'data', 'headers') self.dump_dirs("after prepending root") # Find out the build directories, ie. where to install from. self.set_undefined_options('build', ('build_base', 'build_base'), ('build_lib', 'build_lib')) # Punt on doc directories for now -- after all, we're punting on # documentation completely! def dump_dirs(self, msg): """Dumps the list of user options.""" if not DEBUG: return from distutils.fancy_getopt import longopt_xlate log.debug(msg + ":") for opt in self.user_options: opt_name = opt[0] if opt_name[-1] == "=": opt_name = opt_name[0:-1] if opt_name in self.negative_opt: opt_name = self.negative_opt[opt_name] opt_name = opt_name.translate(longopt_xlate) val = not getattr(self, opt_name) else: opt_name = opt_name.translate(longopt_xlate) val = getattr(self, opt_name) log.debug(" %s: %s", opt_name, val) def finalize_unix(self): """Finalizes options for posix platforms.""" if self.install_base is not None or self.install_platbase is not None: if ((self.install_lib is None and self.install_purelib is None and self.install_platlib is None) or self.install_headers is None or self.install_scripts is None or self.install_data is None): raise DistutilsOptionError( "install-base or install-platbase supplied, but " "installation scheme is incomplete") return if self.user: if self.install_userbase is None: raise DistutilsPlatformError( "User base directory is not specified") self.install_base = self.install_platbase = self.install_userbase self.select_scheme("unix_user") elif self.home is not None: self.install_base = self.install_platbase = self.home self.select_scheme("unix_home") else: if self.prefix is None: if self.exec_prefix is not None: raise DistutilsOptionError( "must not supply exec-prefix without prefix") self.prefix = os.path.normpath(sys.prefix) self.exec_prefix = os.path.normpath(sys.exec_prefix) else: if self.exec_prefix is None: self.exec_prefix = self.prefix self.install_base = self.prefix self.install_platbase = self.exec_prefix self.select_scheme("unix_prefix") def finalize_other(self): """Finalizes options for non-posix platforms""" if self.user: if self.install_userbase is None: raise DistutilsPlatformError( "User base directory is not specified") self.install_base = self.install_platbase = self.install_userbase self.select_scheme(os.name + "_user") elif self.home is not None: self.install_base = self.install_platbase = self.home self.select_scheme("unix_home") else: if self.prefix is None: self.prefix = os.path.normpath(sys.prefix) self.install_base = self.install_platbase = self.prefix try: self.select_scheme(os.name) except KeyError: raise DistutilsPlatformError( "I don't know how to install stuff on '%s'" % os.name) def select_scheme(self, name): """Sets the install directories by applying the install schemes.""" # it's the caller's problem if they supply a bad name! if (hasattr(sys, 'pypy_version_info') and not name.endswith(('_user', '_home'))): if os.name == 'nt': name = 'pypy_nt' else: name = 'pypy' scheme = INSTALL_SCHEMES[name] for key in SCHEME_KEYS: attrname = 'install_' + key if getattr(self, attrname) is None: setattr(self, attrname, scheme[key]) def _expand_attrs(self, attrs): for attr in attrs: val = getattr(self, attr) if val is not None: if os.name == 'posix' or os.name == 'nt': val = os.path.expanduser(val) val = subst_vars(val, self.config_vars) setattr(self, attr, val) def expand_basedirs(self): """Calls `os.path.expanduser` on install_base, install_platbase and root.""" self._expand_attrs(['install_base', 'install_platbase', 'root']) def expand_dirs(self): """Calls `os.path.expanduser` on install dirs.""" self._expand_attrs(['install_purelib', 'install_platlib', 'install_lib', 'install_headers', 'install_scripts', 'install_data',]) def convert_paths(self, *names): """Call `convert_path` over `names`.""" for name in names: attr = "install_" + name setattr(self, attr, convert_path(getattr(self, attr))) def handle_extra_path(self): """Set `path_file` and `extra_dirs` using `extra_path`.""" if self.extra_path is None: self.extra_path = self.distribution.extra_path if self.extra_path is not None: log.warn( "Distribution option extra_path is deprecated. " "See issue27919 for details." ) if isinstance(self.extra_path, str): self.extra_path = self.extra_path.split(',') if len(self.extra_path) == 1: path_file = extra_dirs = self.extra_path[0] elif len(self.extra_path) == 2: path_file, extra_dirs = self.extra_path else: raise DistutilsOptionError( "'extra_path' option must be a list, tuple, or " "comma-separated string with 1 or 2 elements") # convert to local form in case Unix notation used (as it # should be in setup scripts) extra_dirs = convert_path(extra_dirs) else: path_file = None extra_dirs = '' # XXX should we warn if path_file and not extra_dirs? (in which # case the path file would be harmless but pointless) self.path_file = path_file self.extra_dirs = extra_dirs def change_roots(self, *names): """Change the install directories pointed by name using root.""" for name in names: attr = "install_" + name setattr(self, attr, change_root(self.root, getattr(self, attr))) def create_home_path(self): """Create directories under ~.""" if not self.user: return home = convert_path(os.path.expanduser("~")) for name, path in self.config_vars.items(): if path.startswith(home) and not os.path.isdir(path): self.debug_print("os.makedirs('%s', 0o700)" % path) os.makedirs(path, 0o700) # -- Command execution methods ------------------------------------- def run(self): """Runs the command.""" # Obviously have to build before we can install if not self.skip_build: self.run_command('build') # If we built for any other platform, we can't install. build_plat = self.distribution.get_command_obj('build').plat_name # check warn_dir - it is a clue that the 'install' is happening # internally, and not to sys.path, so we don't check the platform # matches what we are running. if self.warn_dir and build_plat != get_platform(): raise DistutilsPlatformError("Can't install when " "cross-compiling") # Run all sub-commands (at least those that need to be run) for cmd_name in self.get_sub_commands(): self.run_command(cmd_name) if self.path_file: self.create_path_file() # write list of installed files, if requested. if self.record: outputs = self.get_outputs() if self.root: # strip any package prefix root_len = len(self.root) for counter in range(len(outputs)): outputs[counter] = outputs[counter][root_len:] self.execute(write_file, (self.record, outputs), "writing list of installed files to '%s'" % self.record) sys_path = map(os.path.normpath, sys.path) sys_path = map(os.path.normcase, sys_path) install_lib = os.path.normcase(os.path.normpath(self.install_lib)) if (self.warn_dir and not (self.path_file and self.install_path_file) and install_lib not in sys_path): log.debug(("modules installed to '%s', which is not in " "Python's module search path (sys.path) -- " "you'll have to change the search path yourself"), self.install_lib) def create_path_file(self): """Creates the .pth file""" filename = os.path.join(self.install_libbase, self.path_file + ".pth") if self.install_path_file: self.execute(write_file, (filename, [self.extra_dirs]), "creating %s" % filename) else: self.warn("path file '%s' not created" % filename) # -- Reporting methods --------------------------------------------- def get_outputs(self): """Assembles the outputs of all the sub-commands.""" outputs = [] for cmd_name in self.get_sub_commands(): cmd = self.get_finalized_command(cmd_name) # Add the contents of cmd.get_outputs(), ensuring # that outputs doesn't contain duplicate entries for filename in cmd.get_outputs(): if filename not in outputs: outputs.append(filename) if self.path_file and self.install_path_file: outputs.append(os.path.join(self.install_libbase, self.path_file + ".pth")) return outputs def get_inputs(self): """Returns the inputs of all the sub-commands""" # XXX gee, this looks familiar ;-( inputs = [] for cmd_name in self.get_sub_commands(): cmd = self.get_finalized_command(cmd_name) inputs.extend(cmd.get_inputs()) return inputs # -- Predicates for sub-command list ------------------------------- def has_lib(self): """Returns true if the current distribution has any Python modules to install.""" return (self.distribution.has_pure_modules() or self.distribution.has_ext_modules()) def has_headers(self): """Returns true if the current distribution has any headers to install.""" return self.distribution.has_headers() def has_scripts(self): """Returns true if the current distribution has any scripts to. install.""" return self.distribution.has_scripts() def has_data(self): """Returns true if the current distribution has any data to. install.""" return self.distribution.has_data_files() # 'sub_commands': a list of commands this command might have to run to # get its work done. See cmd.py for more info. sub_commands = [('install_lib', has_lib), ('install_headers', has_headers), ('install_scripts', has_scripts), ('install_data', has_data), ('install_egg_info', lambda self:True), ]
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/bdist_msi.py
# Copyright (C) 2005, 2006 Martin von Löwis # Licensed to PSF under a Contributor Agreement. # The bdist_wininst command proper # based on bdist_wininst """ Implements the bdist_msi command. """ import os import sys import warnings from distutils.core import Command from distutils.dir_util import remove_tree from distutils.sysconfig import get_python_version from distutils.version import StrictVersion from distutils.errors import DistutilsOptionError from distutils.util import get_platform from distutils import log import msilib from msilib import schema, sequence, text from msilib import Directory, Feature, Dialog, add_data class PyDialog(Dialog): """Dialog class with a fixed layout: controls at the top, then a ruler, then a list of buttons: back, next, cancel. Optionally a bitmap at the left.""" def __init__(self, *args, **kw): """Dialog(database, name, x, y, w, h, attributes, title, first, default, cancel, bitmap=true)""" Dialog.__init__(self, *args) ruler = self.h - 36 bmwidth = 152*ruler/328 #if kw.get("bitmap", True): # self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin") self.line("BottomLine", 0, ruler, self.w, 0) def title(self, title): "Set the title text of the dialog at the top." # name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix, # text, in VerdanaBold10 self.text("Title", 15, 10, 320, 60, 0x30003, r"{\VerdanaBold10}%s" % title) def back(self, title, next, name = "Back", active = 1): """Add a back button with a given title, the tab-next button, its name in the Control table, possibly initially disabled. Return the button, so that events can be associated""" if active: flags = 3 # Visible|Enabled else: flags = 1 # Visible return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next) def cancel(self, title, next, name = "Cancel", active = 1): """Add a cancel button with a given title, the tab-next button, its name in the Control table, possibly initially disabled. Return the button, so that events can be associated""" if active: flags = 3 # Visible|Enabled else: flags = 1 # Visible return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next) def next(self, title, next, name = "Next", active = 1): """Add a Next button with a given title, the tab-next button, its name in the Control table, possibly initially disabled. Return the button, so that events can be associated""" if active: flags = 3 # Visible|Enabled else: flags = 1 # Visible return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next) def xbutton(self, name, title, next, xpos): """Add a button with a given title, the tab-next button, its name in the Control table, giving its x position; the y-position is aligned with the other buttons. Return the button, so that events can be associated""" return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next) class bdist_msi(Command): description = "create a Microsoft Installer (.msi) binary distribution" user_options = [('bdist-dir=', None, "temporary directory for creating the distribution"), ('plat-name=', 'p', "platform name to embed in generated filenames " "(default: %s)" % get_platform()), ('keep-temp', 'k', "keep the pseudo-installation tree around after " + "creating the distribution archive"), ('target-version=', None, "require a specific python version" + " on the target system"), ('no-target-compile', 'c', "do not compile .py to .pyc on the target system"), ('no-target-optimize', 'o', "do not compile .py to .pyo (optimized) " "on the target system"), ('dist-dir=', 'd', "directory to put final built distributions in"), ('skip-build', None, "skip rebuilding everything (for testing/debugging)"), ('install-script=', None, "basename of installation script to be run after " "installation or before deinstallation"), ('pre-install-script=', None, "Fully qualified filename of a script to be run before " "any files are installed. This script need not be in the " "distribution"), ] boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize', 'skip-build'] all_versions = ['2.0', '2.1', '2.2', '2.3', '2.4', '2.5', '2.6', '2.7', '2.8', '2.9', '3.0', '3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9'] other_version = 'X' def __init__(self, *args, **kw): super().__init__(*args, **kw) warnings.warn("bdist_msi command is deprecated since Python 3.9, " "use bdist_wheel (wheel packages) instead", DeprecationWarning, 2) def initialize_options(self): self.bdist_dir = None self.plat_name = None self.keep_temp = 0 self.no_target_compile = 0 self.no_target_optimize = 0 self.target_version = None self.dist_dir = None self.skip_build = None self.install_script = None self.pre_install_script = None self.versions = None def finalize_options(self): self.set_undefined_options('bdist', ('skip_build', 'skip_build')) if self.bdist_dir is None: bdist_base = self.get_finalized_command('bdist').bdist_base self.bdist_dir = os.path.join(bdist_base, 'msi') short_version = get_python_version() if (not self.target_version) and self.distribution.has_ext_modules(): self.target_version = short_version if self.target_version: self.versions = [self.target_version] if not self.skip_build and self.distribution.has_ext_modules()\ and self.target_version != short_version: raise DistutilsOptionError( "target version can only be %s, or the '--skip-build'" " option must be specified" % (short_version,)) else: self.versions = list(self.all_versions) self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'), ('plat_name', 'plat_name'), ) if self.pre_install_script: raise DistutilsOptionError( "the pre-install-script feature is not yet implemented") if self.install_script: for script in self.distribution.scripts: if self.install_script == os.path.basename(script): break else: raise DistutilsOptionError( "install_script '%s' not found in scripts" % self.install_script) self.install_script_key = None def run(self): if not self.skip_build: self.run_command('build') install = self.reinitialize_command('install', reinit_subcommands=1) install.prefix = self.bdist_dir install.skip_build = self.skip_build install.warn_dir = 0 install_lib = self.reinitialize_command('install_lib') # we do not want to include pyc or pyo files install_lib.compile = 0 install_lib.optimize = 0 if self.distribution.has_ext_modules(): # If we are building an installer for a Python version other # than the one we are currently running, then we need to ensure # our build_lib reflects the other Python version rather than ours. # Note that for target_version!=sys.version, we must have skipped the # build step, so there is no issue with enforcing the build of this # version. target_version = self.target_version if not target_version: assert self.skip_build, "Should have already checked this" target_version = '%d.%d' % sys.version_info[:2] plat_specifier = ".%s-%s" % (self.plat_name, target_version) build = self.get_finalized_command('build') build.build_lib = os.path.join(build.build_base, 'lib' + plat_specifier) log.info("installing to %s", self.bdist_dir) install.ensure_finalized() # avoid warning of 'install_lib' about installing # into a directory not in sys.path sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB')) install.run() del sys.path[0] self.mkpath(self.dist_dir) fullname = self.distribution.get_fullname() installer_name = self.get_installer_filename(fullname) installer_name = os.path.abspath(installer_name) if os.path.exists(installer_name): os.unlink(installer_name) metadata = self.distribution.metadata author = metadata.author if not author: author = metadata.maintainer if not author: author = "UNKNOWN" version = metadata.get_version() # ProductVersion must be strictly numeric # XXX need to deal with prerelease versions sversion = "%d.%d.%d" % StrictVersion(version).version # Prefix ProductName with Python x.y, so that # it sorts together with the other Python packages # in Add-Remove-Programs (APR) fullname = self.distribution.get_fullname() if self.target_version: product_name = "Python %s %s" % (self.target_version, fullname) else: product_name = "Python %s" % (fullname) self.db = msilib.init_database(installer_name, schema, product_name, msilib.gen_uuid(), sversion, author) msilib.add_tables(self.db, sequence) props = [('DistVersion', version)] email = metadata.author_email or metadata.maintainer_email if email: props.append(("ARPCONTACT", email)) if metadata.url: props.append(("ARPURLINFOABOUT", metadata.url)) if props: add_data(self.db, 'Property', props) self.add_find_python() self.add_files() self.add_scripts() self.add_ui() self.db.Commit() if hasattr(self.distribution, 'dist_files'): tup = 'bdist_msi', self.target_version or 'any', fullname self.distribution.dist_files.append(tup) if not self.keep_temp: remove_tree(self.bdist_dir, dry_run=self.dry_run) def add_files(self): db = self.db cab = msilib.CAB("distfiles") rootdir = os.path.abspath(self.bdist_dir) root = Directory(db, cab, None, rootdir, "TARGETDIR", "SourceDir") f = Feature(db, "Python", "Python", "Everything", 0, 1, directory="TARGETDIR") items = [(f, root, '')] for version in self.versions + [self.other_version]: target = "TARGETDIR" + version name = default = "Python" + version desc = "Everything" if version is self.other_version: title = "Python from another location" level = 2 else: title = "Python %s from registry" % version level = 1 f = Feature(db, name, title, desc, 1, level, directory=target) dir = Directory(db, cab, root, rootdir, target, default) items.append((f, dir, version)) db.Commit() seen = {} for feature, dir, version in items: todo = [dir] while todo: dir = todo.pop() for file in os.listdir(dir.absolute): afile = os.path.join(dir.absolute, file) if os.path.isdir(afile): short = "%s|%s" % (dir.make_short(file), file) default = file + version newdir = Directory(db, cab, dir, file, default, short) todo.append(newdir) else: if not dir.component: dir.start_component(dir.logical, feature, 0) if afile not in seen: key = seen[afile] = dir.add_file(file) if file==self.install_script: if self.install_script_key: raise DistutilsOptionError( "Multiple files with name %s" % file) self.install_script_key = '[#%s]' % key else: key = seen[afile] add_data(self.db, "DuplicateFile", [(key + version, dir.component, key, None, dir.logical)]) db.Commit() cab.commit(db) def add_find_python(self): """Adds code to the installer to compute the location of Python. Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the registry for each version of Python. Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined, else from PYTHON.MACHINE.X.Y. Properties PYTHONX.Y will be set to TARGETDIRX.Y\\python.exe""" start = 402 for ver in self.versions: install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % ver machine_reg = "python.machine." + ver user_reg = "python.user." + ver machine_prop = "PYTHON.MACHINE." + ver user_prop = "PYTHON.USER." + ver machine_action = "PythonFromMachine" + ver user_action = "PythonFromUser" + ver exe_action = "PythonExe" + ver target_dir_prop = "TARGETDIR" + ver exe_prop = "PYTHON" + ver if msilib.Win64: # type: msidbLocatorTypeRawValue + msidbLocatorType64bit Type = 2+16 else: Type = 2 add_data(self.db, "RegLocator", [(machine_reg, 2, install_path, None, Type), (user_reg, 1, install_path, None, Type)]) add_data(self.db, "AppSearch", [(machine_prop, machine_reg), (user_prop, user_reg)]) add_data(self.db, "CustomAction", [(machine_action, 51+256, target_dir_prop, "[" + machine_prop + "]"), (user_action, 51+256, target_dir_prop, "[" + user_prop + "]"), (exe_action, 51+256, exe_prop, "[" + target_dir_prop + "]\\python.exe"), ]) add_data(self.db, "InstallExecuteSequence", [(machine_action, machine_prop, start), (user_action, user_prop, start + 1), (exe_action, None, start + 2), ]) add_data(self.db, "InstallUISequence", [(machine_action, machine_prop, start), (user_action, user_prop, start + 1), (exe_action, None, start + 2), ]) add_data(self.db, "Condition", [("Python" + ver, 0, "NOT TARGETDIR" + ver)]) start += 4 assert start < 500 def add_scripts(self): if self.install_script: start = 6800 for ver in self.versions + [self.other_version]: install_action = "install_script." + ver exe_prop = "PYTHON" + ver add_data(self.db, "CustomAction", [(install_action, 50, exe_prop, self.install_script_key)]) add_data(self.db, "InstallExecuteSequence", [(install_action, "&Python%s=3" % ver, start)]) start += 1 # XXX pre-install scripts are currently refused in finalize_options() # but if this feature is completed, it will also need to add # entries for each version as the above code does if self.pre_install_script: scriptfn = os.path.join(self.bdist_dir, "preinstall.bat") with open(scriptfn, "w") as f: # The batch file will be executed with [PYTHON], so that %1 # is the path to the Python interpreter; %0 will be the path # of the batch file. # rem =""" # %1 %0 # exit # """ # <actual script> f.write('rem ="""\n%1 %0\nexit\n"""\n') with open(self.pre_install_script) as fin: f.write(fin.read()) add_data(self.db, "Binary", [("PreInstall", msilib.Binary(scriptfn)) ]) add_data(self.db, "CustomAction", [("PreInstall", 2, "PreInstall", None) ]) add_data(self.db, "InstallExecuteSequence", [("PreInstall", "NOT Installed", 450)]) def add_ui(self): db = self.db x = y = 50 w = 370 h = 300 title = "[ProductName] Setup" # see "Dialog Style Bits" modal = 3 # visible | modal modeless = 1 # visible track_disk_space = 32 # UI customization properties add_data(db, "Property", # See "DefaultUIFont Property" [("DefaultUIFont", "DlgFont8"), # See "ErrorDialog Style Bit" ("ErrorDialog", "ErrorDlg"), ("Progress1", "Install"), # modified in maintenance type dlg ("Progress2", "installs"), ("MaintenanceForm_Action", "Repair"), # possible values: ALL, JUSTME ("WhichUsers", "ALL") ]) # Fonts, see "TextStyle Table" add_data(db, "TextStyle", [("DlgFont8", "Tahoma", 9, None, 0), ("DlgFontBold8", "Tahoma", 8, None, 1), #bold ("VerdanaBold10", "Verdana", 10, None, 1), ("VerdanaRed9", "Verdana", 9, 255, 0), ]) # UI Sequences, see "InstallUISequence Table", "Using a Sequence Table" # Numbers indicate sequence; see sequence.py for how these action integrate add_data(db, "InstallUISequence", [("PrepareDlg", "Not Privileged or Windows9x or Installed", 140), ("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141), # In the user interface, assume all-users installation if privileged. ("SelectFeaturesDlg", "Not Installed", 1230), # XXX no support for resume installations yet #("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240), ("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250), ("ProgressDlg", None, 1280)]) add_data(db, 'ActionText', text.ActionText) add_data(db, 'UIText', text.UIText) ##################################################################### # Standard dialogs: FatalError, UserExit, ExitDialog fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title, "Finish", "Finish", "Finish") fatal.title("[ProductName] Installer ended prematurely") fatal.back("< Back", "Finish", active = 0) fatal.cancel("Cancel", "Back", active = 0) fatal.text("Description1", 15, 70, 320, 80, 0x30003, "[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.") fatal.text("Description2", 15, 155, 320, 20, 0x30003, "Click the Finish button to exit the Installer.") c=fatal.next("Finish", "Cancel", name="Finish") c.event("EndDialog", "Exit") user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title, "Finish", "Finish", "Finish") user_exit.title("[ProductName] Installer was interrupted") user_exit.back("< Back", "Finish", active = 0) user_exit.cancel("Cancel", "Back", active = 0) user_exit.text("Description1", 15, 70, 320, 80, 0x30003, "[ProductName] setup was interrupted. Your system has not been modified. " "To install this program at a later time, please run the installation again.") user_exit.text("Description2", 15, 155, 320, 20, 0x30003, "Click the Finish button to exit the Installer.") c = user_exit.next("Finish", "Cancel", name="Finish") c.event("EndDialog", "Exit") exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title, "Finish", "Finish", "Finish") exit_dialog.title("Completing the [ProductName] Installer") exit_dialog.back("< Back", "Finish", active = 0) exit_dialog.cancel("Cancel", "Back", active = 0) exit_dialog.text("Description", 15, 235, 320, 20, 0x30003, "Click the Finish button to exit the Installer.") c = exit_dialog.next("Finish", "Cancel", name="Finish") c.event("EndDialog", "Return") ##################################################################### # Required dialog: FilesInUse, ErrorDlg inuse = PyDialog(db, "FilesInUse", x, y, w, h, 19, # KeepModeless|Modal|Visible title, "Retry", "Retry", "Retry", bitmap=False) inuse.text("Title", 15, 6, 200, 15, 0x30003, r"{\DlgFontBold8}Files in Use") inuse.text("Description", 20, 23, 280, 20, 0x30003, "Some files that need to be updated are currently in use.") inuse.text("Text", 20, 55, 330, 50, 3, "The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.") inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess", None, None, None) c=inuse.back("Exit", "Ignore", name="Exit") c.event("EndDialog", "Exit") c=inuse.next("Ignore", "Retry", name="Ignore") c.event("EndDialog", "Ignore") c=inuse.cancel("Retry", "Exit", name="Retry") c.event("EndDialog","Retry") # See "Error Dialog". See "ICE20" for the required names of the controls. error = Dialog(db, "ErrorDlg", 50, 10, 330, 101, 65543, # Error|Minimize|Modal|Visible title, "ErrorText", None, None) error.text("ErrorText", 50,9,280,48,3, "") #error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None) error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo") error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes") error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort") error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel") error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore") error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk") error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry") ##################################################################### # Global "Query Cancel" dialog cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title, "No", "No", "No") cancel.text("Text", 48, 15, 194, 30, 3, "Are you sure you want to cancel [ProductName] installation?") #cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None, # "py.ico", None, None) c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No") c.event("EndDialog", "Exit") c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes") c.event("EndDialog", "Return") ##################################################################### # Global "Wait for costing" dialog costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title, "Return", "Return", "Return") costing.text("Text", 48, 15, 194, 30, 3, "Please wait while the installer finishes determining your disk space requirements.") c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None) c.event("EndDialog", "Exit") ##################################################################### # Preparation dialog: no user input except cancellation prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title, "Cancel", "Cancel", "Cancel") prep.text("Description", 15, 70, 320, 40, 0x30003, "Please wait while the Installer prepares to guide you through the installation.") prep.title("Welcome to the [ProductName] Installer") c=prep.text("ActionText", 15, 110, 320, 20, 0x30003, "Pondering...") c.mapping("ActionText", "Text") c=prep.text("ActionData", 15, 135, 320, 30, 0x30003, None) c.mapping("ActionData", "Text") prep.back("Back", None, active=0) prep.next("Next", None, active=0) c=prep.cancel("Cancel", None) c.event("SpawnDialog", "CancelDlg") ##################################################################### # Feature (Python directory) selection seldlg = PyDialog(db, "SelectFeaturesDlg", x, y, w, h, modal, title, "Next", "Next", "Cancel") seldlg.title("Select Python Installations") seldlg.text("Hint", 15, 30, 300, 20, 3, "Select the Python locations where %s should be installed." % self.distribution.get_fullname()) seldlg.back("< Back", None, active=0) c = seldlg.next("Next >", "Cancel") order = 1 c.event("[TARGETDIR]", "[SourceDir]", ordering=order) for version in self.versions + [self.other_version]: order += 1 c.event("[TARGETDIR]", "[TARGETDIR%s]" % version, "FEATURE_SELECTED AND &Python%s=3" % version, ordering=order) c.event("SpawnWaitDialog", "WaitForCostingDlg", ordering=order + 1) c.event("EndDialog", "Return", ordering=order + 2) c = seldlg.cancel("Cancel", "Features") c.event("SpawnDialog", "CancelDlg") c = seldlg.control("Features", "SelectionTree", 15, 60, 300, 120, 3, "FEATURE", None, "PathEdit", None) c.event("[FEATURE_SELECTED]", "1") ver = self.other_version install_other_cond = "FEATURE_SELECTED AND &Python%s=3" % ver dont_install_other_cond = "FEATURE_SELECTED AND &Python%s<>3" % ver c = seldlg.text("Other", 15, 200, 300, 15, 3, "Provide an alternate Python location") c.condition("Enable", install_other_cond) c.condition("Show", install_other_cond) c.condition("Disable", dont_install_other_cond) c.condition("Hide", dont_install_other_cond) c = seldlg.control("PathEdit", "PathEdit", 15, 215, 300, 16, 1, "TARGETDIR" + ver, None, "Next", None) c.condition("Enable", install_other_cond) c.condition("Show", install_other_cond) c.condition("Disable", dont_install_other_cond) c.condition("Hide", dont_install_other_cond) ##################################################################### # Disk cost cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title, "OK", "OK", "OK", bitmap=False) cost.text("Title", 15, 6, 200, 15, 0x30003, r"{\DlgFontBold8}Disk Space Requirements") cost.text("Description", 20, 20, 280, 20, 0x30003, "The disk space required for the installation of the selected features.") cost.text("Text", 20, 53, 330, 60, 3, "The highlighted volumes (if any) do not have enough disk space " "available for the currently selected features. You can either " "remove some files from the highlighted volumes, or choose to " "install less features onto local drive(s), or select different " "destination drive(s).") cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223, None, "{120}{70}{70}{70}{70}", None, None) cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return") ##################################################################### # WhichUsers Dialog. Only available on NT, and for privileged users. # This must be run before FindRelatedProducts, because that will # take into account whether the previous installation was per-user # or per-machine. We currently don't support going back to this # dialog after "Next" was selected; to support this, we would need to # find how to reset the ALLUSERS property, and how to re-run # FindRelatedProducts. # On Windows9x, the ALLUSERS property is ignored on the command line # and in the Property table, but installer fails according to the documentation # if a dialog attempts to set ALLUSERS. whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title, "AdminInstall", "Next", "Cancel") whichusers.title("Select whether to install [ProductName] for all users of this computer.") # A radio group with two options: allusers, justme g = whichusers.radiogroup("AdminInstall", 15, 60, 260, 50, 3, "WhichUsers", "", "Next") g.add("ALL", 0, 5, 150, 20, "Install for all users") g.add("JUSTME", 0, 25, 150, 20, "Install just for me") whichusers.back("Back", None, active=0) c = whichusers.next("Next >", "Cancel") c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1) c.event("EndDialog", "Return", ordering = 2) c = whichusers.cancel("Cancel", "AdminInstall") c.event("SpawnDialog", "CancelDlg") ##################################################################### # Installation Progress dialog (modeless) progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title, "Cancel", "Cancel", "Cancel", bitmap=False) progress.text("Title", 20, 15, 200, 15, 0x30003, r"{\DlgFontBold8}[Progress1] [ProductName]") progress.text("Text", 35, 65, 300, 30, 3, "Please wait while the Installer [Progress2] [ProductName]. " "This may take several minutes.") progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:") c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...") c.mapping("ActionText", "Text") #c=progress.text("ActionData", 35, 140, 300, 20, 3, None) #c.mapping("ActionData", "Text") c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537, None, "Progress done", None, None) c.mapping("SetProgress", "Progress") progress.back("< Back", "Next", active=False) progress.next("Next >", "Cancel", active=False) progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg") ################################################################### # Maintenance type: repair/uninstall maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title, "Next", "Next", "Cancel") maint.title("Welcome to the [ProductName] Setup Wizard") maint.text("BodyText", 15, 63, 330, 42, 3, "Select whether you want to repair or remove [ProductName].") g=maint.radiogroup("RepairRadioGroup", 15, 108, 330, 60, 3, "MaintenanceForm_Action", "", "Next") #g.add("Change", 0, 0, 200, 17, "&Change [ProductName]") g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]") g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]") maint.back("< Back", None, active=False) c=maint.next("Finish", "Cancel") # Change installation: Change progress dialog to "Change", then ask # for feature selection #c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1) #c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2) # Reinstall: Change progress dialog to "Repair", then invoke reinstall # Also set list of reinstalled features to "ALL" c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5) c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6) c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7) c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8) # Uninstall: Change progress to "Remove", then invoke uninstall # Also set list of removed features to "ALL" c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11) c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12) c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13) c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14) # Close dialog when maintenance action scheduled c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20) #c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21) maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg") def get_installer_filename(self, fullname): # Factored out to allow overriding in subclasses if self.target_version: base_name = "%s.%s-py%s.msi" % (fullname, self.plat_name, self.target_version) else: base_name = "%s.%s.msi" % (fullname, self.plat_name) installer_name = os.path.join(self.dist_dir, base_name) return installer_name
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/install_egg_info.py
"""distutils.command.install_egg_info Implements the Distutils 'install_egg_info' command, for installing a package's PKG-INFO metadata.""" from distutils.cmd import Command from distutils import log, dir_util import os, sys, re class install_egg_info(Command): """Install an .egg-info file for the package""" description = "Install package's PKG-INFO metadata as an .egg-info file" user_options = [ ('install-dir=', 'd', "directory to install to"), ] def initialize_options(self): self.install_dir = None def finalize_options(self): self.set_undefined_options('install_lib',('install_dir','install_dir')) basename = "%s-%s-py%d.%d.egg-info" % ( to_filename(safe_name(self.distribution.get_name())), to_filename(safe_version(self.distribution.get_version())), *sys.version_info[:2] ) self.target = os.path.join(self.install_dir, basename) self.outputs = [self.target] def run(self): target = self.target if os.path.isdir(target) and not os.path.islink(target): dir_util.remove_tree(target, dry_run=self.dry_run) elif os.path.exists(target): self.execute(os.unlink,(self.target,),"Removing "+target) elif not os.path.isdir(self.install_dir): self.execute(os.makedirs, (self.install_dir,), "Creating "+self.install_dir) log.info("Writing %s", target) if not self.dry_run: with open(target, 'w', encoding='UTF-8') as f: self.distribution.metadata.write_pkg_file(f) def get_outputs(self): return self.outputs # The following routines are taken from setuptools' pkg_resources module and # can be replaced by importing them from pkg_resources once it is included # in the stdlib. def safe_name(name): """Convert an arbitrary string to a standard distribution name Any runs of non-alphanumeric/. characters are replaced with a single '-'. """ return re.sub('[^A-Za-z0-9.]+', '-', name) def safe_version(version): """Convert an arbitrary string to a standard version string Spaces become dots, and all other non-alphanumeric characters become dashes, with runs of multiple dashes condensed to a single dash. """ version = version.replace(' ','.') return re.sub('[^A-Za-z0-9.]+', '-', version) def to_filename(name): """Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. """ return name.replace('-','_')
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/setuptools/_distutils/command/install_data.py
"""distutils.command.install_data Implements the Distutils 'install_data' command, for installing platform-independent data files.""" # contributed by Bastian Kleineidam import os from distutils.core import Command from distutils.util import change_root, convert_path class install_data(Command): description = "install data files" user_options = [ ('install-dir=', 'd', "base directory for installing data files " "(default: installation base dir)"), ('root=', None, "install everything relative to this alternate root directory"), ('force', 'f', "force installation (overwrite existing files)"), ] boolean_options = ['force'] def initialize_options(self): self.install_dir = None self.outfiles = [] self.root = None self.force = 0 self.data_files = self.distribution.data_files self.warn_dir = 1 def finalize_options(self): self.set_undefined_options('install', ('install_data', 'install_dir'), ('root', 'root'), ('force', 'force'), ) def run(self): self.mkpath(self.install_dir) for f in self.data_files: if isinstance(f, str): # it's a simple file, so copy it f = convert_path(f) if self.warn_dir: self.warn("setup script did not provide a directory for " "'%s' -- installing right in '%s'" % (f, self.install_dir)) (out, _) = self.copy_file(f, self.install_dir) self.outfiles.append(out) else: # it's a tuple with path to install to and a list of files dir = convert_path(f[0]) if not os.path.isabs(dir): dir = os.path.join(self.install_dir, dir) elif self.root: dir = change_root(self.root, dir) self.mkpath(dir) if f[1] == []: # If there are no files listed, the user must be # trying to create an empty directory, so add the # directory to the list of output files. self.outfiles.append(dir) else: # Copy files, adding them to the list of output files. for data in f[1]: data = convert_path(data) (out, _) = self.copy_file(data, dir) self.outfiles.append(out) def get_inputs(self): return self.data_files or [] def get_outputs(self): return self.outfiles
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/__init__.py
# coding: utf-8 """ Package resource API -------------------- A resource is a logical file contained within a package, or a logical subdirectory thereof. The package resource API expects resource names to have their path parts separated with ``/``, *not* whatever the local path separator is. Do not use os.path operations to manipulate resource names being passed into the API. The package resource API is designed to work with normal filesystem packages, .egg files, and unpacked .egg files. It can also work in a limited way with .zip files and with custom PEP 302 loaders that support the ``get_data()`` method. """ from __future__ import absolute_import import sys import os import io import time import re import types import zipfile import zipimport import warnings import stat import functools import pkgutil import operator import platform import collections import plistlib import email.parser import errno import tempfile import textwrap import itertools import inspect import ntpath import posixpath from pkgutil import get_importer try: import _imp except ImportError: # Python 3.2 compatibility import imp as _imp try: FileExistsError except NameError: FileExistsError = OSError from pkg_resources.extern import six from pkg_resources.extern.six.moves import map, filter # capture these to bypass sandboxing from os import utime try: from os import mkdir, rename, unlink WRITE_SUPPORT = True except ImportError: # no write support, probably under GAE WRITE_SUPPORT = False from os import open as os_open from os.path import isdir, split try: import importlib.machinery as importlib_machinery # access attribute to force import under delayed import mechanisms. importlib_machinery.__name__ except ImportError: importlib_machinery = None from pkg_resources.extern import appdirs from pkg_resources.extern import packaging __import__('pkg_resources.extern.packaging.version') __import__('pkg_resources.extern.packaging.specifiers') __import__('pkg_resources.extern.packaging.requirements') __import__('pkg_resources.extern.packaging.markers') __metaclass__ = type if (3, 0) < sys.version_info < (3, 5): raise RuntimeError("Python 3.5 or later is required") if six.PY2: # Those builtin exceptions are only defined in Python 3 PermissionError = None NotADirectoryError = None # declare some globals that will be defined later to # satisfy the linters. require = None working_set = None add_activation_listener = None resources_stream = None cleanup_resources = None resource_dir = None resource_stream = None set_extraction_path = None resource_isdir = None resource_string = None iter_entry_points = None resource_listdir = None resource_filename = None resource_exists = None _distribution_finders = None _namespace_handlers = None _namespace_packages = None class PEP440Warning(RuntimeWarning): """ Used when there is an issue with a version or specifier not complying with PEP 440. """ def parse_version(v): try: return packaging.version.Version(v) except packaging.version.InvalidVersion: return packaging.version.LegacyVersion(v) _state_vars = {} def _declare_state(vartype, **kw): globals().update(kw) _state_vars.update(dict.fromkeys(kw, vartype)) def __getstate__(): state = {} g = globals() for k, v in _state_vars.items(): state[k] = g['_sget_' + v](g[k]) return state def __setstate__(state): g = globals() for k, v in state.items(): g['_sset_' + _state_vars[k]](k, g[k], v) return state def _sget_dict(val): return val.copy() def _sset_dict(key, ob, state): ob.clear() ob.update(state) def _sget_object(val): return val.__getstate__() def _sset_object(key, ob, state): ob.__setstate__(state) _sget_none = _sset_none = lambda *args: None def get_supported_platform(): """Return this platform's maximum compatible version. distutils.util.get_platform() normally reports the minimum version of macOS that would be required to *use* extensions produced by distutils. But what we want when checking compatibility is to know the version of macOS that we are *running*. To allow usage of packages that explicitly require a newer version of macOS, we must also know the current version of the OS. If this condition occurs for any other platform with a version in its platform strings, this function should be extended accordingly. """ plat = get_build_platform() m = macosVersionString.match(plat) if m is not None and sys.platform == "darwin": try: plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3)) except ValueError: # not macOS pass return plat __all__ = [ # Basic resource access and distribution/entry point discovery 'require', 'run_script', 'get_provider', 'get_distribution', 'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points', 'resource_string', 'resource_stream', 'resource_filename', 'resource_listdir', 'resource_exists', 'resource_isdir', # Environmental control 'declare_namespace', 'working_set', 'add_activation_listener', 'find_distributions', 'set_extraction_path', 'cleanup_resources', 'get_default_cache', # Primary implementation classes 'Environment', 'WorkingSet', 'ResourceManager', 'Distribution', 'Requirement', 'EntryPoint', # Exceptions 'ResolutionError', 'VersionConflict', 'DistributionNotFound', 'UnknownExtra', 'ExtractionError', # Warnings 'PEP440Warning', # Parsing functions and string utilities 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', # filesystem utilities 'ensure_directory', 'normalize_path', # Distribution "precedence" constants 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', # "Provider" interfaces, implementations, and registration/lookup APIs 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', 'register_finder', 'register_namespace_handler', 'register_loader_type', 'fixup_namespace_packages', 'get_importer', # Warnings 'PkgResourcesDeprecationWarning', # Deprecated/backward compatibility only 'run_main', 'AvailableDistributions', ] class ResolutionError(Exception): """Abstract base for dependency resolution errors""" def __repr__(self): return self.__class__.__name__ + repr(self.args) class VersionConflict(ResolutionError): """ An already-installed version conflicts with the requested version. Should be initialized with the installed Distribution and the requested Requirement. """ _template = "{self.dist} is installed but {self.req} is required" @property def dist(self): return self.args[0] @property def req(self): return self.args[1] def report(self): return self._template.format(**locals()) def with_context(self, required_by): """ If required_by is non-empty, return a version of self that is a ContextualVersionConflict. """ if not required_by: return self args = self.args + (required_by,) return ContextualVersionConflict(*args) class ContextualVersionConflict(VersionConflict): """ A VersionConflict that accepts a third parameter, the set of the requirements that required the installed Distribution. """ _template = VersionConflict._template + ' by {self.required_by}' @property def required_by(self): return self.args[2] class DistributionNotFound(ResolutionError): """A requested distribution was not found""" _template = ("The '{self.req}' distribution was not found " "and is required by {self.requirers_str}") @property def req(self): return self.args[0] @property def requirers(self): return self.args[1] @property def requirers_str(self): if not self.requirers: return 'the application' return ', '.join(self.requirers) def report(self): return self._template.format(**locals()) def __str__(self): return self.report() class UnknownExtra(ResolutionError): """Distribution doesn't have an "extra feature" of the given name""" _provider_factories = {} PY_MAJOR = '{}.{}'.format(*sys.version_info) EGG_DIST = 3 BINARY_DIST = 2 SOURCE_DIST = 1 CHECKOUT_DIST = 0 DEVELOP_DIST = -1 def register_loader_type(loader_type, provider_factory): """Register `provider_factory` to make providers for `loader_type` `loader_type` is the type or class of a PEP 302 ``module.__loader__``, and `provider_factory` is a function that, passed a *module* object, returns an ``IResourceProvider`` for that module. """ _provider_factories[loader_type] = provider_factory def get_provider(moduleOrReq): """Return an IResourceProvider for the named module or requirement""" if isinstance(moduleOrReq, Requirement): return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] try: module = sys.modules[moduleOrReq] except KeyError: __import__(moduleOrReq) module = sys.modules[moduleOrReq] loader = getattr(module, '__loader__', None) return _find_adapter(_provider_factories, loader)(module) def _macos_vers(_cache=[]): if not _cache: version = platform.mac_ver()[0] # fallback for MacPorts if version == '': plist = '/System/Library/CoreServices/SystemVersion.plist' if os.path.exists(plist): if hasattr(plistlib, 'readPlist'): plist_content = plistlib.readPlist(plist) if 'ProductVersion' in plist_content: version = plist_content['ProductVersion'] _cache.append(version.split('.')) return _cache[0] def _macos_arch(machine): return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) def get_build_platform(): """Return this platform's string for platform-specific distributions XXX Currently this is the same as ``distutils.util.get_platform()``, but it needs some hacks for Linux and macOS. """ from sysconfig import get_platform plat = get_platform() if sys.platform == "darwin" and not plat.startswith('macosx-'): try: version = _macos_vers() machine = os.uname()[4].replace(" ", "_") return "macosx-%d.%d-%s" % ( int(version[0]), int(version[1]), _macos_arch(machine), ) except ValueError: # if someone is running a non-Mac darwin system, this will fall # through to the default implementation pass return plat macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") # XXX backward compat get_platform = get_build_platform def compatible_platforms(provided, required): """Can code for the `provided` platform run on the `required` platform? Returns true if either platform is ``None``, or the platforms are equal. XXX Needs compatibility checks for Linux and other unixy OSes. """ if provided is None or required is None or provided == required: # easy case return True # macOS special cases reqMac = macosVersionString.match(required) if reqMac: provMac = macosVersionString.match(provided) # is this a Mac package? if not provMac: # this is backwards compatibility for packages built before # setuptools 0.6. All packages built after this point will # use the new macOS designation. provDarwin = darwinVersionString.match(provided) if provDarwin: dversion = int(provDarwin.group(1)) macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) if dversion == 7 and macosversion >= "10.3" or \ dversion == 8 and macosversion >= "10.4": return True # egg isn't macOS or legacy darwin return False # are they the same major version and machine type? if provMac.group(1) != reqMac.group(1) or \ provMac.group(3) != reqMac.group(3): return False # is the required OS major update >= the provided one? if int(provMac.group(2)) > int(reqMac.group(2)): return False return True # XXX Linux and other platforms' special cases should go here return False def run_script(dist_spec, script_name): """Locate distribution `dist_spec` and run its `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name require(dist_spec)[0].run_script(script_name, ns) # backward compatibility run_main = run_script def get_distribution(dist): """Return a current distribution object for a Requirement or string""" if isinstance(dist, six.string_types): dist = Requirement.parse(dist) if isinstance(dist, Requirement): dist = get_provider(dist) if not isinstance(dist, Distribution): raise TypeError("Expected string, Requirement, or Distribution", dist) return dist def load_entry_point(dist, group, name): """Return `name` entry point of `group` for `dist` or raise ImportError""" return get_distribution(dist).load_entry_point(group, name) def get_entry_map(dist, group=None): """Return the entry point map for `group`, or the full entry map""" return get_distribution(dist).get_entry_map(group) def get_entry_info(dist, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return get_distribution(dist).get_entry_info(group, name) class IMetadataProvider: def has_metadata(name): """Does the package's distribution contain the named metadata?""" def get_metadata(name): """The named metadata resource as a string""" def get_metadata_lines(name): """Yield named metadata resource as list of non-blank non-comment lines Leading and trailing whitespace is stripped from each line, and lines with ``#`` as the first non-blank character are omitted.""" def metadata_isdir(name): """Is the named metadata a directory? (like ``os.path.isdir()``)""" def metadata_listdir(name): """List of metadata names in the directory (like ``os.listdir()``)""" def run_script(script_name, namespace): """Execute the named script in the supplied namespace dictionary""" class IResourceProvider(IMetadataProvider): """An object that provides access to package resources""" def get_resource_filename(manager, resource_name): """Return a true filesystem path for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_stream(manager, resource_name): """Return a readable file-like object for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_string(manager, resource_name): """Return a string containing the contents of `resource_name` `manager` must be an ``IResourceManager``""" def has_resource(resource_name): """Does the package contain the named resource?""" def resource_isdir(resource_name): """Is the named resource a directory? (like ``os.path.isdir()``)""" def resource_listdir(resource_name): """List of resource names in the directory (like ``os.listdir()``)""" class WorkingSet: """A collection of active distributions on sys.path (or a similar list)""" def __init__(self, entries=None): """Create working set from list of path entries (default=sys.path)""" self.entries = [] self.entry_keys = {} self.by_key = {} self.callbacks = [] if entries is None: entries = sys.path for entry in entries: self.add_entry(entry) @classmethod def _build_master(cls): """ Prepare the master working set. """ ws = cls() try: from __main__ import __requires__ except ImportError: # The main program does not list any requirements return ws # ensure the requirements are met try: ws.require(__requires__) except VersionConflict: return cls._build_from_requirements(__requires__) return ws @classmethod def _build_from_requirements(cls, req_spec): """ Build a working set from a requirement spec. Rewrites sys.path. """ # try it without defaults already on sys.path # by starting with an empty path ws = cls([]) reqs = parse_requirements(req_spec) dists = ws.resolve(reqs, Environment()) for dist in dists: ws.add(dist) # add any missing entries from sys.path for entry in sys.path: if entry not in ws.entries: ws.add_entry(entry) # then copy back to sys.path sys.path[:] = ws.entries return ws def add_entry(self, entry): """Add a path item to ``.entries``, finding any distributions on it ``find_distributions(entry, True)`` is used to find distributions corresponding to the path entry, and they are added. `entry` is always appended to ``.entries``, even if it is already present. (This is because ``sys.path`` can contain the same value more than once, and the ``.entries`` of the ``sys.path`` WorkingSet should always equal ``sys.path``.) """ self.entry_keys.setdefault(entry, []) self.entries.append(entry) for dist in find_distributions(entry, True): self.add(dist, entry, False) def __contains__(self, dist): """True if `dist` is the active distribution for its project""" return self.by_key.get(dist.key) == dist def find(self, req): """Find a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned. """ dist = self.by_key.get(req.key) if dist is not None and dist not in req: # XXX add more info raise VersionConflict(dist, req) return dist def iter_entry_points(self, group, name=None): """Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). """ return ( entry for dist in self for entry in dist.get_entry_map(group).values() if name is None or name == entry.name ) def run_script(self, requires, script_name): """Locate distribution for `requires` and run `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name self.require(requires)[0].run_script(script_name, ns) def __iter__(self): """Yield distributions for non-duplicate projects in the working set The yield order is the order in which the items' path entries were added to the working set. """ seen = {} for item in self.entries: if item not in self.entry_keys: # workaround a cache issue continue for key in self.entry_keys[item]: if key not in seen: seen[key] = 1 yield self.by_key[key] def add(self, dist, entry=None, insert=True, replace=False): """Add `dist` to working set, associated with `entry` If `entry` is unspecified, it defaults to the ``.location`` of `dist`. On exit from this routine, `entry` is added to the end of the working set's ``.entries`` (if it wasn't already present). `dist` is only added to the working set if it's for a project that doesn't already have a distribution in the set, unless `replace=True`. If it's added, any callbacks registered with the ``subscribe()`` method will be called. """ if insert: dist.insert_on(self.entries, entry, replace=replace) if entry is None: entry = dist.location keys = self.entry_keys.setdefault(entry, []) keys2 = self.entry_keys.setdefault(dist.location, []) if not replace and dist.key in self.by_key: # ignore hidden distros return self.by_key[dist.key] = dist if dist.key not in keys: keys.append(dist.key) if dist.key not in keys2: keys2.append(dist.key) self._added_new(dist) def resolve(self, requirements, env=None, installer=None, replace_conflicting=False, extras=None): """List all distributions needed to (recursively) meet `requirements` `requirements` must be a sequence of ``Requirement`` objects. `env`, if supplied, should be an ``Environment`` instance. If not supplied, it defaults to all distributions available within any entry or distribution in the working set. `installer`, if supplied, will be invoked with each requirement that cannot be met by an already-installed distribution; it should return a ``Distribution`` or ``None``. Unless `replace_conflicting=True`, raises a VersionConflict exception if any requirements are found on the path that have the correct name but the wrong version. Otherwise, if an `installer` is supplied it will be invoked to obtain the correct version of the requirement and activate it. `extras` is a list of the extras to be used with these requirements. This is important because extra requirements may look like `my_req; extra = "my_extra"`, which would otherwise be interpreted as a purely optional requirement. Instead, we want to be able to assert that these requirements are truly required. """ # set up the stack requirements = list(requirements)[::-1] # set of processed requirements processed = {} # key -> dist best = {} to_activate = [] req_extras = _ReqExtras() # Mapping of requirement to set of distributions that required it; # useful for reporting info about conflicts. required_by = collections.defaultdict(set) while requirements: # process dependencies breadth-first req = requirements.pop(0) if req in processed: # Ignore cyclic or redundant dependencies continue if not req_extras.markers_pass(req, extras): continue dist = best.get(req.key) if dist is None: # Find the best distribution and add it to the map dist = self.by_key.get(req.key) if dist is None or (dist not in req and replace_conflicting): ws = self if env is None: if dist is None: env = Environment(self.entries) else: # Use an empty environment and workingset to avoid # any further conflicts with the conflicting # distribution env = Environment([]) ws = WorkingSet([]) dist = best[req.key] = env.best_match( req, ws, installer, replace_conflicting=replace_conflicting ) if dist is None: requirers = required_by.get(req, None) raise DistributionNotFound(req, requirers) to_activate.append(dist) if dist not in req: # Oops, the "best" so far conflicts with a dependency dependent_req = required_by[req] raise VersionConflict(dist, req).with_context(dependent_req) # push the new requirements onto the stack new_requirements = dist.requires(req.extras)[::-1] requirements.extend(new_requirements) # Register the new requirements needed by req for new_requirement in new_requirements: required_by[new_requirement].add(req.project_name) req_extras[new_requirement] = req.extras processed[req] = True # return list of distros to activate return to_activate def find_plugins( self, plugin_env, full_env=None, installer=None, fallback=True): """Find all activatable distributions in `plugin_env` Example usage:: distributions, errors = working_set.find_plugins( Environment(plugin_dirlist) ) # add plugins+libs to sys.path map(working_set.add, distributions) # display errors print('Could not load', errors) The `plugin_env` should be an ``Environment`` instance that contains only distributions that are in the project's "plugin directory" or directories. The `full_env`, if supplied, should be an ``Environment`` contains all currently-available distributions. If `full_env` is not supplied, one is created automatically from the ``WorkingSet`` this method is called on, which will typically mean that every directory on ``sys.path`` will be scanned for distributions. `installer` is a standard installer callback as used by the ``resolve()`` method. The `fallback` flag indicates whether we should attempt to resolve older versions of a plugin if the newest version cannot be resolved. This method returns a 2-tuple: (`distributions`, `error_info`), where `distributions` is a list of the distributions found in `plugin_env` that were loadable, along with any other distributions that are needed to resolve their dependencies. `error_info` is a dictionary mapping unloadable plugin distributions to an exception instance describing the error that occurred. Usually this will be a ``DistributionNotFound`` or ``VersionConflict`` instance. """ plugin_projects = list(plugin_env) # scan project names in alphabetic order plugin_projects.sort() error_info = {} distributions = {} if full_env is None: env = Environment(self.entries) env += plugin_env else: env = full_env + plugin_env shadow_set = self.__class__([]) # put all our entries in shadow_set list(map(shadow_set.add, self)) for project_name in plugin_projects: for dist in plugin_env[project_name]: req = [dist.as_requirement()] try: resolvees = shadow_set.resolve(req, env, installer) except ResolutionError as v: # save error info error_info[dist] = v if fallback: # try the next older version of project continue else: # give up on this project, keep going break else: list(map(shadow_set.add, resolvees)) distributions.update(dict.fromkeys(resolvees)) # success, no need to try any more versions of this project break distributions = list(distributions) distributions.sort() return distributions, error_info def require(self, *requirements): """Ensure that distributions matching `requirements` are activated `requirements` must be a string or a (possibly-nested) sequence thereof, specifying the distributions and versions required. The return value is a sequence of the distributions that needed to be activated to fulfill the requirements; all relevant distributions are included, even if they were already activated in this working set. """ needed = self.resolve(parse_requirements(requirements)) for dist in needed: self.add(dist) return needed def subscribe(self, callback, existing=True): """Invoke `callback` for all distributions If `existing=True` (default), call on all existing ones, as well. """ if callback in self.callbacks: return self.callbacks.append(callback) if not existing: return for dist in self: callback(dist) def _added_new(self, dist): for callback in self.callbacks: callback(dist) def __getstate__(self): return ( self.entries[:], self.entry_keys.copy(), self.by_key.copy(), self.callbacks[:] ) def __setstate__(self, e_k_b_c): entries, keys, by_key, callbacks = e_k_b_c self.entries = entries[:] self.entry_keys = keys.copy() self.by_key = by_key.copy() self.callbacks = callbacks[:] class _ReqExtras(dict): """ Map each requirement to the extras that demanded it. """ def markers_pass(self, req, extras=None): """ Evaluate markers for req against each extra that demanded it. Return False if the req has a marker and fails evaluation. Otherwise, return True. """ extra_evals = ( req.marker.evaluate({'extra': extra}) for extra in self.get(req, ()) + (extras or (None,)) ) return not req.marker or any(extra_evals) class Environment: """Searchable snapshot of distributions on a search path""" def __init__( self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR): """Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an optional string naming the desired version of Python (e.g. ``'3.6'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you wish to map *all* distributions, not just those compatible with the running platform or Python version. """ self._distmap = {} self.platform = platform self.python = python self.scan(search_path) def can_add(self, dist): """Is distribution `dist` acceptable for this environment? The distribution must match the platform and python version requirements specified when this environment was created, or False is returned. """ py_compat = ( self.python is None or dist.py_version is None or dist.py_version == self.python ) return py_compat and compatible_platforms(dist.platform, self.platform) def remove(self, dist): """Remove `dist` from the environment""" self._distmap[dist.key].remove(dist) def scan(self, search_path=None): """Scan `search_path` for distributions usable in this environment Any distributions found are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. Only distributions conforming to the platform/python version defined at initialization are added. """ if search_path is None: search_path = sys.path for item in search_path: for dist in find_distributions(item): self.add(dist) def __getitem__(self, project_name): """Return a newest-to-oldest list of distributions for `project_name` Uses case-insensitive `project_name` comparison, assuming all the project's distributions use their project's name converted to all lowercase as their key. """ distribution_key = project_name.lower() return self._distmap.get(distribution_key, []) def add(self, dist): """Add `dist` if we ``can_add()`` it and it has not already been added """ if self.can_add(dist) and dist.has_version(): dists = self._distmap.setdefault(dist.key, []) if dist not in dists: dists.append(dist) dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) def best_match( self, req, working_set, installer=None, replace_conflicting=False): """Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a suitable distribution is already active. (This may raise ``VersionConflict`` if an unsuitable version of the project is already active in the specified `working_set`.) If a suitable distribution isn't active, this method returns the newest distribution in the environment that meets the ``Requirement`` in `req`. If no suitable distribution is found, and `installer` is supplied, then the result of calling the environment's ``obtain(req, installer)`` method will be returned. """ try: dist = working_set.find(req) except VersionConflict: if not replace_conflicting: raise dist = None if dist is not None: return dist for dist in self[req.key]: if dist in req: return dist # try to download/install return self.obtain(req, installer) def obtain(self, requirement, installer=None): """Obtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the base ``Environment`` class, this routine just returns ``installer(requirement)``, unless `installer` is None, in which case None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.""" if installer is not None: return installer(requirement) def __iter__(self): """Yield the unique project names of the available distributions""" for key in self._distmap.keys(): if self[key]: yield key def __iadd__(self, other): """In-place addition of a distribution or environment""" if isinstance(other, Distribution): self.add(other) elif isinstance(other, Environment): for project in other: for dist in other[project]: self.add(dist) else: raise TypeError("Can't add %r to environment" % (other,)) return self def __add__(self, other): """Add an environment or distribution to an environment""" new = self.__class__([], platform=None, python=None) for env in self, other: new += env return new # XXX backward compatibility AvailableDistributions = Environment class ExtractionError(RuntimeError): """An error occurred extracting a resource The following attributes are available from instances of this exception: manager The resource manager that raised this exception cache_path The base directory for resource extraction original_error The exception instance that caused extraction to fail """ class ResourceManager: """Manage resource extraction and packages""" extraction_path = None def __init__(self): self.cached_files = {} def resource_exists(self, package_or_requirement, resource_name): """Does the named resource exist?""" return get_provider(package_or_requirement).has_resource(resource_name) def resource_isdir(self, package_or_requirement, resource_name): """Is the named resource an existing directory?""" return get_provider(package_or_requirement).resource_isdir( resource_name ) def resource_filename(self, package_or_requirement, resource_name): """Return a true filesystem path for specified resource""" return get_provider(package_or_requirement).get_resource_filename( self, resource_name ) def resource_stream(self, package_or_requirement, resource_name): """Return a readable file-like object for specified resource""" return get_provider(package_or_requirement).get_resource_stream( self, resource_name ) def resource_string(self, package_or_requirement, resource_name): """Return specified resource as a string""" return get_provider(package_or_requirement).get_resource_string( self, resource_name ) def resource_listdir(self, package_or_requirement, resource_name): """List the contents of the named resource directory""" return get_provider(package_or_requirement).resource_listdir( resource_name ) def extraction_error(self): """Give an error message for problems extracting file(s)""" old_exc = sys.exc_info()[1] cache_path = self.extraction_path or get_default_cache() tmpl = textwrap.dedent(""" Can't extract file(s) to egg cache The following error occurred while trying to extract file(s) to the Python egg cache: {old_exc} The Python egg cache directory is currently set to: {cache_path} Perhaps your account does not have write access to this directory? You can change the cache directory by setting the PYTHON_EGG_CACHE environment variable to point to an accessible directory. """).lstrip() err = ExtractionError(tmpl.format(**locals())) err.manager = self err.cache_path = cache_path err.original_error = old_exc raise err def get_cache_path(self, archive_name, names=()): """Return absolute location in cache for `archive_name` and `names` The parent directory of the resulting path will be created if it does not already exist. `archive_name` should be the base filename of the enclosing egg (which may not be the name of the enclosing zipfile!), including its ".egg" extension. `names`, if provided, should be a sequence of path name parts "under" the egg's extraction location. This method should only be called by resource providers that need to obtain an extraction location, and only for names they intend to extract, as it tracks the generated names for possible cleanup later. """ extract_path = self.extraction_path or get_default_cache() target_path = os.path.join(extract_path, archive_name + '-tmp', *names) try: _bypass_ensure_directory(target_path) except Exception: self.extraction_error() self._warn_unsafe_extraction_path(extract_path) self.cached_files[target_path] = 1 return target_path @staticmethod def _warn_unsafe_extraction_path(path): """ If the default extraction path is overridden and set to an insecure location, such as /tmp, it opens up an opportunity for an attacker to replace an extracted file with an unauthorized payload. Warn the user if a known insecure location is used. See Distribute #375 for more details. """ if os.name == 'nt' and not path.startswith(os.environ['windir']): # On Windows, permissions are generally restrictive by default # and temp directories are not writable by other users, so # bypass the warning. return mode = os.stat(path).st_mode if mode & stat.S_IWOTH or mode & stat.S_IWGRP: msg = ( "Extraction path is writable by group/others " "and vulnerable to attack when " "used with get_resource_filename ({path}). " "Consider a more secure " "location (set with .set_extraction_path or the " "PYTHON_EGG_CACHE environment variable)." ).format(**locals()) warnings.warn(msg, UserWarning) def postprocess(self, tempname, filename): """Perform any platform-specific postprocessing of `tempname` This is where Mac header rewrites should be done; other platforms don't have anything special they should do. Resource providers should call this method ONLY after successfully extracting a compressed resource. They must NOT call it on resources that are already in the filesystem. `tempname` is the current (temporary) name of the file, and `filename` is the name it will be renamed to by the caller after this routine returns. """ if os.name == 'posix': # Make the resource executable mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 os.chmod(tempname, mode) def set_extraction_path(self, path): """Set the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the path defaults to the return value of ``get_default_cache()``. (Which is based on the ``PYTHON_EGG_CACHE`` environment variable, with various platform-specific fallbacks. See that routine's documentation for more details.) Resources are extracted to subdirectories of this path based upon information given by the ``IResourceProvider``. You may set this to a temporary directory, but then you must call ``cleanup_resources()`` to delete the extracted files when done. There is no guarantee that ``cleanup_resources()`` will be able to remove all extracted files. (Note: you may not change the extraction path for a given resource manager once resources have been extracted, unless you first call ``cleanup_resources()``.) """ if self.cached_files: raise ValueError( "Can't change extraction path, files already extracted" ) self.extraction_path = path def cleanup_resources(self, force=False): """ Delete all extracted resource files and directories, returning a list of the file and directory names that could not be successfully removed. This function does not have any concurrency protection, so it should generally only be called when the extraction path is a temporary directory exclusive to a single process. This method is not automatically called; you must call it explicitly or register it as an ``atexit`` function if you wish to ensure cleanup of a temporary directory used for extractions. """ # XXX def get_default_cache(): """ Return the ``PYTHON_EGG_CACHE`` environment variable or a platform-relevant user cache dir for an app named "Python-Eggs". """ return ( os.environ.get('PYTHON_EGG_CACHE') or appdirs.user_cache_dir(appname='Python-Eggs') ) def safe_name(name): """Convert an arbitrary string to a standard distribution name Any runs of non-alphanumeric/. characters are replaced with a single '-'. """ return re.sub('[^A-Za-z0-9.]+', '-', name) def safe_version(version): """ Convert an arbitrary string to a standard version string """ try: # normalize the version return str(packaging.version.Version(version)) except packaging.version.InvalidVersion: version = version.replace(' ', '.') return re.sub('[^A-Za-z0-9.]+', '-', version) def safe_extra(extra): """Convert an arbitrary string to a standard 'extra' name Any runs of non-alphanumeric characters are replaced with a single '_', and the result is always lowercased. """ return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower() def to_filename(name): """Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. """ return name.replace('-', '_') def invalid_marker(text): """ Validate text as a PEP 508 environment marker; return an exception if invalid or False otherwise. """ try: evaluate_marker(text) except SyntaxError as e: e.filename = None e.lineno = None return e return False def evaluate_marker(text, extra=None): """ Evaluate a PEP 508 environment marker. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. This implementation uses the 'pyparsing' module. """ try: marker = packaging.markers.Marker(text) return marker.evaluate() except packaging.markers.InvalidMarker as e: raise SyntaxError(e) from e class NullProvider: """Try to implement resources and metadata for arbitrary PEP 302 loaders""" egg_name = None egg_info = None loader = None def __init__(self, module): self.loader = getattr(module, '__loader__', None) self.module_path = os.path.dirname(getattr(module, '__file__', '')) def get_resource_filename(self, manager, resource_name): return self._fn(self.module_path, resource_name) def get_resource_stream(self, manager, resource_name): return io.BytesIO(self.get_resource_string(manager, resource_name)) def get_resource_string(self, manager, resource_name): return self._get(self._fn(self.module_path, resource_name)) def has_resource(self, resource_name): return self._has(self._fn(self.module_path, resource_name)) def _get_metadata_path(self, name): return self._fn(self.egg_info, name) def has_metadata(self, name): if not self.egg_info: return self.egg_info path = self._get_metadata_path(name) return self._has(path) def get_metadata(self, name): if not self.egg_info: return "" path = self._get_metadata_path(name) value = self._get(path) if six.PY2: return value try: return value.decode('utf-8') except UnicodeDecodeError as exc: # Include the path in the error message to simplify # troubleshooting, and without changing the exception type. exc.reason += ' in {} file at path: {}'.format(name, path) raise def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) def resource_isdir(self, resource_name): return self._isdir(self._fn(self.module_path, resource_name)) def metadata_isdir(self, name): return self.egg_info and self._isdir(self._fn(self.egg_info, name)) def resource_listdir(self, resource_name): return self._listdir(self._fn(self.module_path, resource_name)) def metadata_listdir(self, name): if self.egg_info: return self._listdir(self._fn(self.egg_info, name)) return [] def run_script(self, script_name, namespace): script = 'scripts/' + script_name if not self.has_metadata(script): raise ResolutionError( "Script {script!r} not found in metadata at {self.egg_info!r}" .format(**locals()), ) script_text = self.get_metadata(script).replace('\r\n', '\n') script_text = script_text.replace('\r', '\n') script_filename = self._fn(self.egg_info, script) namespace['__file__'] = script_filename if os.path.exists(script_filename): with open(script_filename) as fid: source = fid.read() code = compile(source, script_filename, 'exec') exec(code, namespace, namespace) else: from linecache import cache cache[script_filename] = ( len(script_text), 0, script_text.split('\n'), script_filename ) script_code = compile(script_text, script_filename, 'exec') exec(script_code, namespace, namespace) def _has(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _isdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _listdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _fn(self, base, resource_name): self._validate_resource_path(resource_name) if resource_name: return os.path.join(base, *resource_name.split('/')) return base @staticmethod def _validate_resource_path(path): """ Validate the resource paths according to the docs. https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access >>> warned = getfixture('recwarn') >>> warnings.simplefilter('always') >>> vrp = NullProvider._validate_resource_path >>> vrp('foo/bar.txt') >>> bool(warned) False >>> vrp('../foo/bar.txt') >>> bool(warned) True >>> warned.clear() >>> vrp('/foo/bar.txt') >>> bool(warned) True >>> vrp('foo/../../bar.txt') >>> bool(warned) True >>> warned.clear() >>> vrp('foo/f../bar.txt') >>> bool(warned) False Windows path separators are straight-up disallowed. >>> vrp(r'\\foo/bar.txt') Traceback (most recent call last): ... ValueError: Use of .. or absolute path in a resource path \ is not allowed. >>> vrp(r'C:\\foo/bar.txt') Traceback (most recent call last): ... ValueError: Use of .. or absolute path in a resource path \ is not allowed. Blank values are allowed >>> vrp('') >>> bool(warned) False Non-string values are not. >>> vrp(None) Traceback (most recent call last): ... AttributeError: ... """ invalid = ( os.path.pardir in path.split(posixpath.sep) or posixpath.isabs(path) or ntpath.isabs(path) ) if not invalid: return msg = "Use of .. or absolute path in a resource path is not allowed." # Aggressively disallow Windows absolute paths if ntpath.isabs(path) and not posixpath.isabs(path): raise ValueError(msg) # for compatibility, warn; in future # raise ValueError(msg) warnings.warn( msg[:-1] + " and will raise exceptions in a future release.", DeprecationWarning, stacklevel=4, ) def _get(self, path): if hasattr(self.loader, 'get_data'): return self.loader.get_data(path) raise NotImplementedError( "Can't perform this operation for loaders without 'get_data()'" ) register_loader_type(object, NullProvider) def _parents(path): """ yield all parents of path including path """ last = None while path != last: yield path last = path path, _ = os.path.split(path) class EggProvider(NullProvider): """Provider based on a virtual filesystem""" def __init__(self, module): NullProvider.__init__(self, module) self._setup_prefix() def _setup_prefix(self): # Assume that metadata may be nested inside a "basket" # of multiple eggs and use module_path instead of .archive. eggs = filter(_is_egg_path, _parents(self.module_path)) egg = next(eggs, None) egg and self._set_egg(egg) def _set_egg(self, path): self.egg_name = os.path.basename(path) self.egg_info = os.path.join(path, 'EGG-INFO') self.egg_root = path class DefaultProvider(EggProvider): """Provides access to package resources in the filesystem""" def _has(self, path): return os.path.exists(path) def _isdir(self, path): return os.path.isdir(path) def _listdir(self, path): return os.listdir(path) def get_resource_stream(self, manager, resource_name): return open(self._fn(self.module_path, resource_name), 'rb') def _get(self, path): with open(path, 'rb') as stream: return stream.read() @classmethod def _register(cls): loader_names = 'SourceFileLoader', 'SourcelessFileLoader', for name in loader_names: loader_cls = getattr(importlib_machinery, name, type(None)) register_loader_type(loader_cls, cls) DefaultProvider._register() class EmptyProvider(NullProvider): """Provider that returns nothing for all requests""" module_path = None _isdir = _has = lambda self, path: False def _get(self, path): return '' def _listdir(self, path): return [] def __init__(self): pass empty_provider = EmptyProvider() class ZipManifests(dict): """ zip manifest builder """ @classmethod def build(cls, path): """ Build a dictionary similar to the zipimport directory caches, except instead of tuples, store ZipInfo objects. Use a platform-specific path separator (os.sep) for the path keys for compatibility with pypy on Windows. """ with zipfile.ZipFile(path) as zfile: items = ( ( name.replace('/', os.sep), zfile.getinfo(name), ) for name in zfile.namelist() ) return dict(items) load = build class MemoizedZipManifests(ZipManifests): """ Memoized zipfile manifests. """ manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') def load(self, path): """ Load a manifest at path or return a suitable manifest already loaded. """ path = os.path.normpath(path) mtime = os.stat(path).st_mtime if path not in self or self[path].mtime != mtime: manifest = self.build(path) self[path] = self.manifest_mod(manifest, mtime) return self[path].manifest class ZipProvider(EggProvider): """Resource support for zips and eggs""" eagers = None _zip_manifests = MemoizedZipManifests() def __init__(self, module): EggProvider.__init__(self, module) self.zip_pre = self.loader.archive + os.sep def _zipinfo_name(self, fspath): # Convert a virtual filename (full path to file) into a zipfile subpath # usable with the zipimport directory cache for our target archive fspath = fspath.rstrip(os.sep) if fspath == self.loader.archive: return '' if fspath.startswith(self.zip_pre): return fspath[len(self.zip_pre):] raise AssertionError( "%s is not a subpath of %s" % (fspath, self.zip_pre) ) def _parts(self, zip_path): # Convert a zipfile subpath into an egg-relative path part list. # pseudo-fs path fspath = self.zip_pre + zip_path if fspath.startswith(self.egg_root + os.sep): return fspath[len(self.egg_root) + 1:].split(os.sep) raise AssertionError( "%s is not a subpath of %s" % (fspath, self.egg_root) ) @property def zipinfo(self): return self._zip_manifests.load(self.loader.archive) def get_resource_filename(self, manager, resource_name): if not self.egg_name: raise NotImplementedError( "resource_filename() only supported for .egg, not .zip" ) # no need to lock for extraction, since we use temp names zip_path = self._resource_to_zip(resource_name) eagers = self._get_eager_resources() if '/'.join(self._parts(zip_path)) in eagers: for name in eagers: self._extract_resource(manager, self._eager_to_zip(name)) return self._extract_resource(manager, zip_path) @staticmethod def _get_date_and_size(zip_stat): size = zip_stat.file_size # ymdhms+wday, yday, dst date_time = zip_stat.date_time + (0, 0, -1) # 1980 offset already done timestamp = time.mktime(date_time) return timestamp, size def _extract_resource(self, manager, zip_path): if zip_path in self._index(): for name in self._index()[zip_path]: last = self._extract_resource( manager, os.path.join(zip_path, name) ) # return the extracted directory name return os.path.dirname(last) timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not WRITE_SUPPORT: raise IOError('"os.rename" and "os.unlink" are not supported ' 'on this platform') try: real_path = manager.get_cache_path( self.egg_name, self._parts(zip_path) ) if self._is_current(real_path, zip_path): return real_path outf, tmpnam = _mkstemp( ".$extract", dir=os.path.dirname(real_path), ) os.write(outf, self.loader.get_data(zip_path)) os.close(outf) utime(tmpnam, (timestamp, timestamp)) manager.postprocess(tmpnam, real_path) try: rename(tmpnam, real_path) except os.error: if os.path.isfile(real_path): if self._is_current(real_path, zip_path): # the file became current since it was checked above, # so proceed. return real_path # Windows, del old file and retry elif os.name == 'nt': unlink(real_path) rename(tmpnam, real_path) return real_path raise except os.error: # report a user-friendly error manager.extraction_error() return real_path def _is_current(self, file_path, zip_path): """ Return True if the file_path is current for this zip_path """ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not os.path.isfile(file_path): return False stat = os.stat(file_path) if stat.st_size != size or stat.st_mtime != timestamp: return False # check that the contents match zip_contents = self.loader.get_data(zip_path) with open(file_path, 'rb') as f: file_contents = f.read() return zip_contents == file_contents def _get_eager_resources(self): if self.eagers is None: eagers = [] for name in ('native_libs.txt', 'eager_resources.txt'): if self.has_metadata(name): eagers.extend(self.get_metadata_lines(name)) self.eagers = eagers return self.eagers def _index(self): try: return self._dirindex except AttributeError: ind = {} for path in self.zipinfo: parts = path.split(os.sep) while parts: parent = os.sep.join(parts[:-1]) if parent in ind: ind[parent].append(parts[-1]) break else: ind[parent] = [parts.pop()] self._dirindex = ind return ind def _has(self, fspath): zip_path = self._zipinfo_name(fspath) return zip_path in self.zipinfo or zip_path in self._index() def _isdir(self, fspath): return self._zipinfo_name(fspath) in self._index() def _listdir(self, fspath): return list(self._index().get(self._zipinfo_name(fspath), ())) def _eager_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.egg_root, resource_name)) def _resource_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.module_path, resource_name)) register_loader_type(zipimport.zipimporter, ZipProvider) class FileMetadata(EmptyProvider): """Metadata handler for standalone PKG-INFO files Usage:: metadata = FileMetadata("/path/to/PKG-INFO") This provider rejects all data and metadata requests except for PKG-INFO, which is treated as existing, and will be the contents of the file at the provided location. """ def __init__(self, path): self.path = path def _get_metadata_path(self, name): return self.path def has_metadata(self, name): return name == 'PKG-INFO' and os.path.isfile(self.path) def get_metadata(self, name): if name != 'PKG-INFO': raise KeyError("No metadata except PKG-INFO is available") with io.open(self.path, encoding='utf-8', errors="replace") as f: metadata = f.read() self._warn_on_replacement(metadata) return metadata def _warn_on_replacement(self, metadata): # Python 2.7 compat for: replacement_char = '�' replacement_char = b'\xef\xbf\xbd'.decode('utf-8') if replacement_char in metadata: tmpl = "{self.path} could not be properly decoded in UTF-8" msg = tmpl.format(**locals()) warnings.warn(msg) def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) class PathMetadata(DefaultProvider): """Metadata provider for egg directories Usage:: # Development eggs: egg_info = "/path/to/PackageName.egg-info" base_dir = os.path.dirname(egg_info) metadata = PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] dist = Distribution(basedir, project_name=dist_name, metadata=metadata) # Unpacked egg directories: egg_path = "/path/to/PackageName-ver-pyver-etc.egg" metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) dist = Distribution.from_filename(egg_path, metadata=metadata) """ def __init__(self, path, egg_info): self.module_path = path self.egg_info = egg_info class EggMetadata(ZipProvider): """Metadata provider for .egg files""" def __init__(self, importer): """Create a metadata provider from a zipimporter""" self.zip_pre = importer.archive + os.sep self.loader = importer if importer.prefix: self.module_path = os.path.join(importer.archive, importer.prefix) else: self.module_path = importer.archive self._setup_prefix() _declare_state('dict', _distribution_finders={}) def register_finder(importer_type, distribution_finder): """Register `distribution_finder` to find distributions in sys.path items `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `distribution_finder` is a callable that, passed a path item and the importer instance, yields ``Distribution`` instances found on that path item. See ``pkg_resources.find_on_path`` for an example.""" _distribution_finders[importer_type] = distribution_finder def find_distributions(path_item, only=False): """Yield distributions accessible via `path_item`""" importer = get_importer(path_item) finder = _find_adapter(_distribution_finders, importer) return finder(importer, path_item, only) def find_eggs_in_zip(importer, path_item, only=False): """ Find eggs in zip files; possibly multiple nested eggs. """ if importer.archive.endswith('.whl'): # wheels are not supported with this finder # they don't have PKG-INFO metadata, and won't ever contain eggs return metadata = EggMetadata(importer) if metadata.has_metadata('PKG-INFO'): yield Distribution.from_filename(path_item, metadata=metadata) if only: # don't yield nested distros return for subitem in metadata.resource_listdir(''): if _is_egg_path(subitem): subpath = os.path.join(path_item, subitem) dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath) for dist in dists: yield dist elif subitem.lower().endswith('.dist-info'): subpath = os.path.join(path_item, subitem) submeta = EggMetadata(zipimport.zipimporter(subpath)) submeta.egg_info = subpath yield Distribution.from_location(path_item, subitem, submeta) register_finder(zipimport.zipimporter, find_eggs_in_zip) def find_nothing(importer, path_item, only=False): return () register_finder(object, find_nothing) def _by_version_descending(names): """ Given a list of filenames, return them in descending order by version number. >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg' >>> _by_version_descending(names) ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar'] >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg' >>> _by_version_descending(names) ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg'] >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg' >>> _by_version_descending(names) ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg'] """ def _by_version(name): """ Parse each component of the filename """ name, ext = os.path.splitext(name) parts = itertools.chain(name.split('-'), [ext]) return [packaging.version.parse(part) for part in parts] return sorted(names, key=_by_version, reverse=True) def find_on_path(importer, path_item, only=False): """Yield distributions accessible on a sys.path directory""" path_item = _normalize_cached(path_item) if _is_unpacked_egg(path_item): yield Distribution.from_filename( path_item, metadata=PathMetadata( path_item, os.path.join(path_item, 'EGG-INFO') ) ) return entries = safe_listdir(path_item) # for performance, before sorting by version, # screen entries for only those that will yield # distributions filtered = ( entry for entry in entries if dist_factory(path_item, entry, only) ) # scan for .egg and .egg-info in directory path_item_entries = _by_version_descending(filtered) for entry in path_item_entries: fullpath = os.path.join(path_item, entry) factory = dist_factory(path_item, entry, only) for dist in factory(fullpath): yield dist def dist_factory(path_item, entry, only): """Return a dist_factory for the given entry.""" lower = entry.lower() is_egg_info = lower.endswith('.egg-info') is_dist_info = ( lower.endswith('.dist-info') and os.path.isdir(os.path.join(path_item, entry)) ) is_meta = is_egg_info or is_dist_info return ( distributions_from_metadata if is_meta else find_distributions if not only and _is_egg_path(entry) else resolve_egg_link if not only and lower.endswith('.egg-link') else NoDists() ) class NoDists: """ >>> bool(NoDists()) False >>> list(NoDists()('anything')) [] """ def __bool__(self): return False if six.PY2: __nonzero__ = __bool__ def __call__(self, fullpath): return iter(()) def safe_listdir(path): """ Attempt to list contents of path, but suppress some exceptions. """ try: return os.listdir(path) except (PermissionError, NotADirectoryError): pass except OSError as e: # Ignore the directory if does not exist, not a directory or # permission denied ignorable = ( e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT) # Python 2 on Windows needs to be handled this way :( or getattr(e, "winerror", None) == 267 ) if not ignorable: raise return () def distributions_from_metadata(path): root = os.path.dirname(path) if os.path.isdir(path): if len(os.listdir(path)) == 0: # empty metadata dir; skip return metadata = PathMetadata(root, path) else: metadata = FileMetadata(path) entry = os.path.basename(path) yield Distribution.from_location( root, entry, metadata, precedence=DEVELOP_DIST, ) def non_empty_lines(path): """ Yield non-empty lines from file at path """ with open(path) as f: for line in f: line = line.strip() if line: yield line def resolve_egg_link(path): """ Given a path to an .egg-link, resolve distributions present in the referenced path. """ referenced_paths = non_empty_lines(path) resolved_paths = ( os.path.join(os.path.dirname(path), ref) for ref in referenced_paths ) dist_groups = map(find_distributions, resolved_paths) return next(dist_groups, ()) register_finder(pkgutil.ImpImporter, find_on_path) if hasattr(importlib_machinery, 'FileFinder'): register_finder(importlib_machinery.FileFinder, find_on_path) _declare_state('dict', _namespace_handlers={}) _declare_state('dict', _namespace_packages={}) def register_namespace_handler(importer_type, namespace_handler): """Register `namespace_handler` to declare namespace packages `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `namespace_handler` is a callable like this:: def namespace_handler(importer, path_entry, moduleName, module): # return a path_entry to use for child packages Namespace handlers are only called if the importer object has already agreed that it can handle the relevant path item, and they should only return a subpath if the module __path__ does not already contain an equivalent subpath. For an example namespace handler, see ``pkg_resources.file_ns_handler``. """ _namespace_handlers[importer_type] = namespace_handler def _handle_ns(packageName, path_item): """Ensure that named package includes a subpath of path_item (if needed)""" importer = get_importer(path_item) if importer is None: return None # use find_spec (PEP 451) and fall-back to find_module (PEP 302) try: loader = importer.find_spec(packageName).loader except AttributeError: # capture warnings due to #1111 with warnings.catch_warnings(): warnings.simplefilter("ignore") loader = importer.find_module(packageName) if loader is None: return None module = sys.modules.get(packageName) if module is None: module = sys.modules[packageName] = types.ModuleType(packageName) module.__path__ = [] _set_parent_ns(packageName) elif not hasattr(module, '__path__'): raise TypeError("Not a package:", packageName) handler = _find_adapter(_namespace_handlers, importer) subpath = handler(importer, path_item, packageName, module) if subpath is not None: path = module.__path__ path.append(subpath) loader.load_module(packageName) _rebuild_mod_path(path, packageName, module) return subpath def _rebuild_mod_path(orig_path, package_name, module): """ Rebuild module.__path__ ensuring that all entries are ordered corresponding to their sys.path order """ sys_path = [_normalize_cached(p) for p in sys.path] def safe_sys_path_index(entry): """ Workaround for #520 and #513. """ try: return sys_path.index(entry) except ValueError: return float('inf') def position_in_sys_path(path): """ Return the ordinal of the path based on its position in sys.path """ path_parts = path.split(os.sep) module_parts = package_name.count('.') + 1 parts = path_parts[:-module_parts] return safe_sys_path_index(_normalize_cached(os.sep.join(parts))) new_path = sorted(orig_path, key=position_in_sys_path) new_path = [_normalize_cached(p) for p in new_path] if isinstance(module.__path__, list): module.__path__[:] = new_path else: module.__path__ = new_path def declare_namespace(packageName): """Declare that package 'packageName' is a namespace package""" _imp.acquire_lock() try: if packageName in _namespace_packages: return path = sys.path parent, _, _ = packageName.rpartition('.') if parent: declare_namespace(parent) if parent not in _namespace_packages: __import__(parent) try: path = sys.modules[parent].__path__ except AttributeError as e: raise TypeError("Not a package:", parent) from e # Track what packages are namespaces, so when new path items are added, # they can be updated _namespace_packages.setdefault(parent or None, []).append(packageName) _namespace_packages.setdefault(packageName, []) for path_item in path: # Ensure all the parent's path items are reflected in the child, # if they apply _handle_ns(packageName, path_item) finally: _imp.release_lock() def fixup_namespace_packages(path_item, parent=None): """Ensure that previously-declared namespace packages include path_item""" _imp.acquire_lock() try: for package in _namespace_packages.get(parent, ()): subpath = _handle_ns(package, path_item) if subpath: fixup_namespace_packages(subpath, package) finally: _imp.release_lock() def file_ns_handler(importer, path_item, packageName, module): """Compute an ns-package subpath for a filesystem or zipfile importer""" subpath = os.path.join(path_item, packageName.split('.')[-1]) normalized = _normalize_cached(subpath) for item in module.__path__: if _normalize_cached(item) == normalized: break else: # Only return the path if it's not already there return subpath register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) register_namespace_handler(zipimport.zipimporter, file_ns_handler) if hasattr(importlib_machinery, 'FileFinder'): register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) def null_ns_handler(importer, path_item, packageName, module): return None register_namespace_handler(object, null_ns_handler) def normalize_path(filename): """Normalize a file/dir name for comparison purposes""" return os.path.normcase(os.path.realpath(os.path.normpath( _cygwin_patch(filename)))) def _cygwin_patch(filename): # pragma: nocover """ Contrary to POSIX 2008, on Cygwin, getcwd (3) contains symlink components. Using os.path.abspath() works around this limitation. A fix in os.getcwd() would probably better, in Cygwin even more so, except that this seems to be by design... """ return os.path.abspath(filename) if sys.platform == 'cygwin' else filename def _normalize_cached(filename, _cache={}): try: return _cache[filename] except KeyError: _cache[filename] = result = normalize_path(filename) return result def _is_egg_path(path): """ Determine if given path appears to be an egg. """ return path.lower().endswith('.egg') def _is_unpacked_egg(path): """ Determine if given path appears to be an unpacked egg. """ return ( _is_egg_path(path) and os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO')) ) def _set_parent_ns(packageName): parts = packageName.split('.') name = parts.pop() if parts: parent = '.'.join(parts) setattr(sys.modules[parent], name, sys.modules[packageName]) def yield_lines(strs): """Yield non-empty/non-comment lines of a string or sequence""" if isinstance(strs, six.string_types): for s in strs.splitlines(): s = s.strip() # skip blank lines/comments if s and not s.startswith('#'): yield s else: for ss in strs: for s in yield_lines(ss): yield s MODULE = re.compile(r"\w+(\.\w+)*$").match EGG_NAME = re.compile( r""" (?P<name>[^-]+) ( -(?P<ver>[^-]+) ( -py(?P<pyver>[^-]+) ( -(?P<plat>.+) )? )? )? """, re.VERBOSE | re.IGNORECASE, ).match class EntryPoint: """Object representing an advertised importable object""" def __init__(self, name, module_name, attrs=(), extras=(), dist=None): if not MODULE(module_name): raise ValueError("Invalid module name", module_name) self.name = name self.module_name = module_name self.attrs = tuple(attrs) self.extras = tuple(extras) self.dist = dist def __str__(self): s = "%s = %s" % (self.name, self.module_name) if self.attrs: s += ':' + '.'.join(self.attrs) if self.extras: s += ' [%s]' % ','.join(self.extras) return s def __repr__(self): return "EntryPoint.parse(%r)" % str(self) def load(self, require=True, *args, **kwargs): """ Require packages for this EntryPoint, then resolve it. """ if not require or args or kwargs: warnings.warn( "Parameters to load are deprecated. Call .resolve and " ".require separately.", PkgResourcesDeprecationWarning, stacklevel=2, ) if require: self.require(*args, **kwargs) return self.resolve() def resolve(self): """ Resolve the entry point from its module and attrs. """ module = __import__(self.module_name, fromlist=['__name__'], level=0) try: return functools.reduce(getattr, self.attrs, module) except AttributeError as exc: raise ImportError(str(exc)) from exc def require(self, env=None, installer=None): if self.extras and not self.dist: raise UnknownExtra("Can't require() without a distribution", self) # Get the requirements for this entry point with all its extras and # then resolve them. We have to pass `extras` along when resolving so # that the working set knows what extras we want. Otherwise, for # dist-info distributions, the working set will assume that the # requirements for that extra are purely optional and skip over them. reqs = self.dist.requires(self.extras) items = working_set.resolve(reqs, env, installer, extras=self.extras) list(map(working_set.add, items)) pattern = re.compile( r'\s*' r'(?P<name>.+?)\s*' r'=\s*' r'(?P<module>[\w.]+)\s*' r'(:\s*(?P<attr>[\w.]+))?\s*' r'(?P<extras>\[.*\])?\s*$' ) @classmethod def parse(cls, src, dist=None): """Parse a single entry point from string `src` Entry point syntax follows the form:: name = some.module:some.attr [extra1, extra2] The entry name and module name are required, but the ``:attrs`` and ``[extras]`` parts are optional """ m = cls.pattern.match(src) if not m: msg = "EntryPoint must be in 'name=module:attrs [extras]' format" raise ValueError(msg, src) res = m.groupdict() extras = cls._parse_extras(res['extras']) attrs = res['attr'].split('.') if res['attr'] else () return cls(res['name'], res['module'], attrs, extras, dist) @classmethod def _parse_extras(cls, extras_spec): if not extras_spec: return () req = Requirement.parse('x' + extras_spec) if req.specs: raise ValueError() return req.extras @classmethod def parse_group(cls, group, lines, dist=None): """Parse an entry point group""" if not MODULE(group): raise ValueError("Invalid group name", group) this = {} for line in yield_lines(lines): ep = cls.parse(line, dist) if ep.name in this: raise ValueError("Duplicate entry point", group, ep.name) this[ep.name] = ep return this @classmethod def parse_map(cls, data, dist=None): """Parse a map of entry point groups""" if isinstance(data, dict): data = data.items() else: data = split_sections(data) maps = {} for group, lines in data: if group is None: if not lines: continue raise ValueError("Entry points must be listed in groups") group = group.strip() if group in maps: raise ValueError("Duplicate group name", group) maps[group] = cls.parse_group(group, lines, dist) return maps def _version_from_file(lines): """ Given an iterable of lines from a Metadata file, return the value of the Version field, if present, or None otherwise. """ def is_version_line(line): return line.lower().startswith('version:') version_lines = filter(is_version_line, lines) line = next(iter(version_lines), '') _, _, value = line.partition(':') return safe_version(value.strip()) or None class Distribution: """Wrap an actual or potential sys.path entry w/metadata""" PKG_INFO = 'PKG-INFO' def __init__( self, location=None, metadata=None, project_name=None, version=None, py_version=PY_MAJOR, platform=None, precedence=EGG_DIST): self.project_name = safe_name(project_name or 'Unknown') if version is not None: self._version = safe_version(version) self.py_version = py_version self.platform = platform self.location = location self.precedence = precedence self._provider = metadata or empty_provider @classmethod def from_location(cls, location, basename, metadata=None, **kw): project_name, version, py_version, platform = [None] * 4 basename, ext = os.path.splitext(basename) if ext.lower() in _distributionImpl: cls = _distributionImpl[ext.lower()] match = EGG_NAME(basename) if match: project_name, version, py_version, platform = match.group( 'name', 'ver', 'pyver', 'plat' ) return cls( location, metadata, project_name=project_name, version=version, py_version=py_version, platform=platform, **kw )._reload_version() def _reload_version(self): return self @property def hashcmp(self): return ( self.parsed_version, self.precedence, self.key, self.location, self.py_version or '', self.platform or '', ) def __hash__(self): return hash(self.hashcmp) def __lt__(self, other): return self.hashcmp < other.hashcmp def __le__(self, other): return self.hashcmp <= other.hashcmp def __gt__(self, other): return self.hashcmp > other.hashcmp def __ge__(self, other): return self.hashcmp >= other.hashcmp def __eq__(self, other): if not isinstance(other, self.__class__): # It's not a Distribution, so they are not equal return False return self.hashcmp == other.hashcmp def __ne__(self, other): return not self == other # These properties have to be lazy so that we don't have to load any # metadata until/unless it's actually needed. (i.e., some distributions # may not know their name or version without loading PKG-INFO) @property def key(self): try: return self._key except AttributeError: self._key = key = self.project_name.lower() return key @property def parsed_version(self): if not hasattr(self, "_parsed_version"): self._parsed_version = parse_version(self.version) return self._parsed_version def _warn_legacy_version(self): LV = packaging.version.LegacyVersion is_legacy = isinstance(self._parsed_version, LV) if not is_legacy: return # While an empty version is technically a legacy version and # is not a valid PEP 440 version, it's also unlikely to # actually come from someone and instead it is more likely that # it comes from setuptools attempting to parse a filename and # including it in the list. So for that we'll gate this warning # on if the version is anything at all or not. if not self.version: return tmpl = textwrap.dedent(""" '{project_name} ({version})' is being parsed as a legacy, non PEP 440, version. You may find odd behavior and sort order. In particular it will be sorted as less than 0.0. It is recommended to migrate to PEP 440 compatible versions. """).strip().replace('\n', ' ') warnings.warn(tmpl.format(**vars(self)), PEP440Warning) @property def version(self): try: return self._version except AttributeError as e: version = self._get_version() if version is None: path = self._get_metadata_path_for_display(self.PKG_INFO) msg = ( "Missing 'Version:' header and/or {} file at path: {}" ).format(self.PKG_INFO, path) raise ValueError(msg, self) from e return version @property def _dep_map(self): """ A map of extra to its list of (direct) requirements for this distribution, including the null extra. """ try: return self.__dep_map except AttributeError: self.__dep_map = self._filter_extras(self._build_dep_map()) return self.__dep_map @staticmethod def _filter_extras(dm): """ Given a mapping of extras to dependencies, strip off environment markers and filter out any dependencies not matching the markers. """ for extra in list(filter(None, dm)): new_extra = extra reqs = dm.pop(extra) new_extra, _, marker = extra.partition(':') fails_marker = marker and ( invalid_marker(marker) or not evaluate_marker(marker) ) if fails_marker: reqs = [] new_extra = safe_extra(new_extra) or None dm.setdefault(new_extra, []).extend(reqs) return dm def _build_dep_map(self): dm = {} for name in 'requires.txt', 'depends.txt': for extra, reqs in split_sections(self._get_metadata(name)): dm.setdefault(extra, []).extend(parse_requirements(reqs)) return dm def requires(self, extras=()): """List of Requirements needed for this distro if `extras` are used""" dm = self._dep_map deps = [] deps.extend(dm.get(None, ())) for ext in extras: try: deps.extend(dm[safe_extra(ext)]) except KeyError as e: raise UnknownExtra( "%s has no such extra feature %r" % (self, ext) ) from e return deps def _get_metadata_path_for_display(self, name): """ Return the path to the given metadata file, if available. """ try: # We need to access _get_metadata_path() on the provider object # directly rather than through this class's __getattr__() # since _get_metadata_path() is marked private. path = self._provider._get_metadata_path(name) # Handle exceptions e.g. in case the distribution's metadata # provider doesn't support _get_metadata_path(). except Exception: return '[could not detect]' return path def _get_metadata(self, name): if self.has_metadata(name): for line in self.get_metadata_lines(name): yield line def _get_version(self): lines = self._get_metadata(self.PKG_INFO) version = _version_from_file(lines) return version def activate(self, path=None, replace=False): """Ensure distribution is importable on `path` (default=sys.path)""" if path is None: path = sys.path self.insert_on(path, replace=replace) if path is sys.path: fixup_namespace_packages(self.location) for pkg in self._get_metadata('namespace_packages.txt'): if pkg in sys.modules: declare_namespace(pkg) def egg_name(self): """Return what this distribution's standard .egg filename should be""" filename = "%s-%s-py%s" % ( to_filename(self.project_name), to_filename(self.version), self.py_version or PY_MAJOR ) if self.platform: filename += '-' + self.platform return filename def __repr__(self): if self.location: return "%s (%s)" % (self, self.location) else: return str(self) def __str__(self): try: version = getattr(self, 'version', None) except ValueError: version = None version = version or "[unknown version]" return "%s %s" % (self.project_name, version) def __getattr__(self, attr): """Delegate all unrecognized public attributes to .metadata provider""" if attr.startswith('_'): raise AttributeError(attr) return getattr(self._provider, attr) def __dir__(self): return list( set(super(Distribution, self).__dir__()) | set( attr for attr in self._provider.__dir__() if not attr.startswith('_') ) ) if not hasattr(object, '__dir__'): # python 2.7 not supported del __dir__ @classmethod def from_filename(cls, filename, metadata=None, **kw): return cls.from_location( _normalize_cached(filename), os.path.basename(filename), metadata, **kw ) def as_requirement(self): """Return a ``Requirement`` that matches this distribution exactly""" if isinstance(self.parsed_version, packaging.version.Version): spec = "%s==%s" % (self.project_name, self.parsed_version) else: spec = "%s===%s" % (self.project_name, self.parsed_version) return Requirement.parse(spec) def load_entry_point(self, group, name): """Return the `name` entry point of `group` or raise ImportError""" ep = self.get_entry_info(group, name) if ep is None: raise ImportError("Entry point %r not found" % ((group, name),)) return ep.load() def get_entry_map(self, group=None): """Return the entry point map for `group`, or the full entry map""" try: ep_map = self._ep_map except AttributeError: ep_map = self._ep_map = EntryPoint.parse_map( self._get_metadata('entry_points.txt'), self ) if group is not None: return ep_map.get(group, {}) return ep_map def get_entry_info(self, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return self.get_entry_map(group).get(name) def insert_on(self, path, loc=None, replace=False): """Ensure self.location is on path If replace=False (default): - If location is already in path anywhere, do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent. - Else: add to the end of path. If replace=True: - If location is already on path anywhere (not eggs) or higher priority than its parent (eggs) do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent, removing any lower-priority entries. - Else: add it to the front of path. """ loc = loc or self.location if not loc: return nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) npath = [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item == nloc: if replace: break else: # don't modify path (even removing duplicates) if # found and not replace return elif item == bdir and self.precedence == EGG_DIST: # if it's an .egg, give it precedence over its directory # UNLESS it's already been added to sys.path and replace=False if (not replace) and nloc in npath[p:]: return if path is sys.path: self.check_version_conflict() path.insert(p, loc) npath.insert(p, nloc) break else: if path is sys.path: self.check_version_conflict() if replace: path.insert(0, loc) else: path.append(loc) return # p is the spot where we found or inserted loc; now remove duplicates while True: try: np = npath.index(nloc, p + 1) except ValueError: break else: del npath[np], path[np] # ha! p = np return def check_version_conflict(self): if self.key == 'setuptools': # ignore the inevitable setuptools self-conflicts :( return nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) loc = normalize_path(self.location) for modname in self._get_metadata('top_level.txt'): if (modname not in sys.modules or modname in nsp or modname in _namespace_packages): continue if modname in ('pkg_resources', 'setuptools', 'site'): continue fn = getattr(sys.modules[modname], '__file__', None) if fn and (normalize_path(fn).startswith(loc) or fn.startswith(self.location)): continue issue_warning( "Module %s was already imported from %s, but %s is being added" " to sys.path" % (modname, fn, self.location), ) def has_version(self): try: self.version except ValueError: issue_warning("Unbuilt egg for " + repr(self)) return False return True def clone(self, **kw): """Copy this distribution, substituting in any changed keyword args""" names = 'project_name version py_version platform location precedence' for attr in names.split(): kw.setdefault(attr, getattr(self, attr, None)) kw.setdefault('metadata', self._provider) return self.__class__(**kw) @property def extras(self): return [dep for dep in self._dep_map if dep] class EggInfoDistribution(Distribution): def _reload_version(self): """ Packages installed by distutils (e.g. numpy or scipy), which uses an old safe_version, and so their version numbers can get mangled when converted to filenames (e.g., 1.11.0.dev0+2329eae to 1.11.0.dev0_2329eae). These distributions will not be parsed properly downstream by Distribution and safe_version, so take an extra step and try to get the version number from the metadata file itself instead of the filename. """ md_version = self._get_version() if md_version: self._version = md_version return self class DistInfoDistribution(Distribution): """ Wrap an actual or potential sys.path entry w/metadata, .dist-info style. """ PKG_INFO = 'METADATA' EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") @property def _parsed_pkg_info(self): """Parse and cache metadata""" try: return self._pkg_info except AttributeError: metadata = self.get_metadata(self.PKG_INFO) self._pkg_info = email.parser.Parser().parsestr(metadata) return self._pkg_info @property def _dep_map(self): try: return self.__dep_map except AttributeError: self.__dep_map = self._compute_dependencies() return self.__dep_map def _compute_dependencies(self): """Recompute this distribution's dependencies.""" dm = self.__dep_map = {None: []} reqs = [] # Including any condition expressions for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: reqs.extend(parse_requirements(req)) def reqs_for_extra(extra): for req in reqs: if not req.marker or req.marker.evaluate({'extra': extra}): yield req common = frozenset(reqs_for_extra(None)) dm[None].extend(common) for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: s_extra = safe_extra(extra.strip()) dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common) return dm _distributionImpl = { '.egg': Distribution, '.egg-info': EggInfoDistribution, '.dist-info': DistInfoDistribution, } def issue_warning(*args, **kw): level = 1 g = globals() try: # find the first stack frame that is *not* code in # the pkg_resources module, to use for the warning while sys._getframe(level).f_globals is g: level += 1 except ValueError: pass warnings.warn(stacklevel=level + 1, *args, **kw) def parse_requirements(strs): """Yield ``Requirement`` objects for each specification in `strs` `strs` must be a string, or a (possibly-nested) iterable thereof. """ # create a steppable iterator, so we can handle \-continuations lines = iter(yield_lines(strs)) for line in lines: # Drop comments -- a hash without a space may be in a URL. if ' #' in line: line = line[:line.find(' #')] # If there is a line continuation, drop it, and append the next line. if line.endswith('\\'): line = line[:-2].strip() try: line += next(lines) except StopIteration: return yield Requirement(line) class RequirementParseError(packaging.requirements.InvalidRequirement): "Compatibility wrapper for InvalidRequirement" class Requirement(packaging.requirements.Requirement): def __init__(self, requirement_string): """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" super(Requirement, self).__init__(requirement_string) self.unsafe_name = self.name project_name = safe_name(self.name) self.project_name, self.key = project_name, project_name.lower() self.specs = [ (spec.operator, spec.version) for spec in self.specifier] self.extras = tuple(map(safe_extra, self.extras)) self.hashCmp = ( self.key, self.url, self.specifier, frozenset(self.extras), str(self.marker) if self.marker else None, ) self.__hash = hash(self.hashCmp) def __eq__(self, other): return ( isinstance(other, Requirement) and self.hashCmp == other.hashCmp ) def __ne__(self, other): return not self == other def __contains__(self, item): if isinstance(item, Distribution): if item.key != self.key: return False item = item.version # Allow prereleases always in order to match the previous behavior of # this method. In the future this should be smarter and follow PEP 440 # more accurately. return self.specifier.contains(item, prereleases=True) def __hash__(self): return self.__hash def __repr__(self): return "Requirement.parse(%r)" % str(self) @staticmethod def parse(s): req, = parse_requirements(s) return req def _always_object(classes): """ Ensure object appears in the mro even for old-style classes. """ if object not in classes: return classes + (object,) return classes def _find_adapter(registry, ob): """Return an adapter factory for `ob` from `registry`""" types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob)))) for t in types: if t in registry: return registry[t] def ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) os.makedirs(dirname, exist_ok=True) def _bypass_ensure_directory(path): """Sandbox-bypassing version of ensure_directory()""" if not WRITE_SUPPORT: raise IOError('"os.mkdir" not supported on this platform.') dirname, filename = split(path) if dirname and filename and not isdir(dirname): _bypass_ensure_directory(dirname) try: mkdir(dirname, 0o755) except FileExistsError: pass def split_sections(s): """Split a string or iterable thereof into (section, content) pairs Each ``section`` is a stripped version of the section header ("[section]") and each ``content`` is a list of stripped lines excluding blank lines and comment-only lines. If there are any such lines before the first section header, they're returned in a first ``section`` of ``None``. """ section = None content = [] for line in yield_lines(s): if line.startswith("["): if line.endswith("]"): if section or content: yield section, content section = line[1:-1].strip() content = [] else: raise ValueError("Invalid section heading", line) else: content.append(line) # wrap up last segment yield section, content def _mkstemp(*args, **kw): old_open = os.open try: # temporarily bypass sandboxing os.open = os_open return tempfile.mkstemp(*args, **kw) finally: # and then put it back os.open = old_open # Silence the PEP440Warning by default, so that end users don't get hit by it # randomly just because they use pkg_resources. We want to append the rule # because we want earlier uses of filterwarnings to take precedence over this # one. warnings.filterwarnings("ignore", category=PEP440Warning, append=True) # from jaraco.functools 1.3 def _call_aside(f, *args, **kwargs): f(*args, **kwargs) return f @_call_aside def _initialize(g=globals()): "Set up global resource manager (deliberately not state-saved)" manager = ResourceManager() g['_manager'] = manager g.update( (name, getattr(manager, name)) for name in dir(manager) if not name.startswith('_') ) @_call_aside def _initialize_master_working_set(): """ Prepare the master working set and make the ``require()`` API available. This function has explicit effects on the global state of pkg_resources. It is intended to be invoked once at the initialization of this module. Invocation by other packages is unsupported and done at their own risk. """ working_set = WorkingSet._build_master() _declare_state('object', working_set=working_set) require = working_set.require iter_entry_points = working_set.iter_entry_points add_activation_listener = working_set.subscribe run_script = working_set.run_script # backward compatibility run_main = run_script # Activate all distributions already on sys.path with replace=False and # ensure that all distributions added to the working set in the future # (e.g. by calling ``require()``) will get activated as well, # with higher priority (replace=True). tuple( dist.activate(replace=False) for dist in working_set ) add_activation_listener( lambda dist: dist.activate(replace=True), existing=False, ) working_set.entries = [] # match order list(map(working_set.add_entry, sys.path)) globals().update(locals()) class PkgResourcesDeprecationWarning(Warning): """ Base class for warning about deprecations in ``pkg_resources`` This class is not derived from ``DeprecationWarning``, and as such is visible by default. """
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor/appdirs.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2005-2010 ActiveState Software Inc. # Copyright (c) 2013 Eddy Petrișor """Utilities for determining application-specific dirs. See <http://github.com/ActiveState/appdirs> for details and usage. """ # Dev Notes: # - MSDN on where to store app data files: # http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 # - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html # - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html __version_info__ = (1, 4, 3) __version__ = '.'.join(map(str, __version_info__)) import sys import os PY3 = sys.version_info[0] == 3 if PY3: unicode = str if sys.platform.startswith('java'): import platform os_name = platform.java_ver()[3][0] if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. system = 'win32' elif os_name.startswith('Mac'): # "Mac OS X", etc. system = 'darwin' else: # "Linux", "SunOS", "FreeBSD", etc. # Setting this to "linux2" is not ideal, but only Windows or Mac # are actually checked for and the rest of the module expects # *sys.platform* style strings. system = 'linux2' else: system = sys.platform def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): r"""Return full path to the user-specific data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> for a discussion of issues. Typical user data directories are: Mac OS X: ~/Library/Application Support/<AppName> Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName> Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName> Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName> Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName> For Unix, we follow the XDG spec and support $XDG_DATA_HOME. That means, by default "~/.local/share/<AppName>". """ if system == "win32": if appauthor is None: appauthor = appname const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" path = os.path.normpath(_get_win_folder(const)) if appname: if appauthor is not False: path = os.path.join(path, appauthor, appname) else: path = os.path.join(path, appname) elif system == 'darwin': path = os.path.expanduser('~/Library/Application Support/') if appname: path = os.path.join(path, appname) else: path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): r"""Return full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of data dirs should be returned. By default, the first item from XDG_DATA_DIRS is returned, or '/usr/local/share/<AppName>', if XDG_DATA_DIRS is not set Typical site data directories are: Mac OS X: /Library/Application Support/<AppName> Unix: /usr/local/share/<AppName> or /usr/share/<AppName> Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName> Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7. For Unix, this is using the $XDG_DATA_DIRS[0] default. WARNING: Do not use this on Windows. See the Vista-Fail note above for why. """ if system == "win32": if appauthor is None: appauthor = appname path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) if appname: if appauthor is not False: path = os.path.join(path, appauthor, appname) else: path = os.path.join(path, appname) elif system == 'darwin': path = os.path.expanduser('/Library/Application Support') if appname: path = os.path.join(path, appname) else: # XDG default for $XDG_DATA_DIRS # only first, if multipath is False path = os.getenv('XDG_DATA_DIRS', os.pathsep.join(['/usr/local/share', '/usr/share'])) pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] if appname: if version: appname = os.path.join(appname, version) pathlist = [os.sep.join([x, appname]) for x in pathlist] if multipath: path = os.pathsep.join(pathlist) else: path = pathlist[0] return path if appname and version: path = os.path.join(path, version) return path def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): r"""Return full path to the user-specific config dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> for a discussion of issues. Typical user config directories are: Mac OS X: same as user_data_dir Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. That means, by default "~/.config/<AppName>". """ if system in ["win32", "darwin"]: path = user_data_dir(appname, appauthor, None, roaming) else: path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): r"""Return full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of config dirs should be returned. By default, the first item from XDG_CONFIG_DIRS is returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set Typical site config directories are: Mac OS X: same as site_data_dir Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in $XDG_CONFIG_DIRS Win *: same as site_data_dir Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False WARNING: Do not use this on Windows. See the Vista-Fail note above for why. """ if system in ["win32", "darwin"]: path = site_data_dir(appname, appauthor) if appname and version: path = os.path.join(path, version) else: # XDG default for $XDG_CONFIG_DIRS # only first, if multipath is False path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] if appname: if version: appname = os.path.join(appname, version) pathlist = [os.sep.join([x, appname]) for x in pathlist] if multipath: path = os.pathsep.join(pathlist) else: path = pathlist[0] return path def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): r"""Return full path to the user-specific cache dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Cache" to the base app data dir for Windows. See discussion below. Typical user cache directories are: Mac OS X: ~/Library/Caches/<AppName> Unix: ~/.cache/<AppName> (XDG default) Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming app data dir (the default returned by `user_data_dir` above). Apps typically put cache data somewhere *under* the given dir here. Some examples: ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache ...\Acme\SuperApp\Cache\1.0 OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. This can be disabled with the `opinion=False` option. """ if system == "win32": if appauthor is None: appauthor = appname path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) if appname: if appauthor is not False: path = os.path.join(path, appauthor, appname) else: path = os.path.join(path, appname) if opinion: path = os.path.join(path, "Cache") elif system == 'darwin': path = os.path.expanduser('~/Library/Caches') if appname: path = os.path.join(path, appname) else: path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path def user_state_dir(appname=None, appauthor=None, version=None, roaming=False): r"""Return full path to the user-specific state dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> for a discussion of issues. Typical user state directories are: Mac OS X: same as user_data_dir Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined Win *: same as user_data_dir For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state> to extend the XDG spec and support $XDG_STATE_HOME. That means, by default "~/.local/state/<AppName>". """ if system in ["win32", "darwin"]: path = user_data_dir(appname, appauthor, None, roaming) else: path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state")) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): r"""Return full path to the user-specific log dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Logs" to the base app data dir for Windows, and "log" to the base cache dir for Unix. See discussion below. Typical user log directories are: Mac OS X: ~/Library/Logs/<AppName> Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in examples of what some windows apps use for a logs dir.) OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` value for Windows and appends "log" to the user cache dir for Unix. This can be disabled with the `opinion=False` option. """ if system == "darwin": path = os.path.join( os.path.expanduser('~/Library/Logs'), appname) elif system == "win32": path = user_data_dir(appname, appauthor, version) version = False if opinion: path = os.path.join(path, "Logs") else: path = user_cache_dir(appname, appauthor, version) version = False if opinion: path = os.path.join(path, "log") if appname and version: path = os.path.join(path, version) return path class AppDirs(object): """Convenience wrapper for getting application dirs.""" def __init__(self, appname=None, appauthor=None, version=None, roaming=False, multipath=False): self.appname = appname self.appauthor = appauthor self.version = version self.roaming = roaming self.multipath = multipath @property def user_data_dir(self): return user_data_dir(self.appname, self.appauthor, version=self.version, roaming=self.roaming) @property def site_data_dir(self): return site_data_dir(self.appname, self.appauthor, version=self.version, multipath=self.multipath) @property def user_config_dir(self): return user_config_dir(self.appname, self.appauthor, version=self.version, roaming=self.roaming) @property def site_config_dir(self): return site_config_dir(self.appname, self.appauthor, version=self.version, multipath=self.multipath) @property def user_cache_dir(self): return user_cache_dir(self.appname, self.appauthor, version=self.version) @property def user_state_dir(self): return user_state_dir(self.appname, self.appauthor, version=self.version) @property def user_log_dir(self): return user_log_dir(self.appname, self.appauthor, version=self.version) #---- internal support stuff def _get_win_folder_from_registry(csidl_name): """This is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names. """ if PY3: import winreg as _winreg else: import _winreg shell_folder_name = { "CSIDL_APPDATA": "AppData", "CSIDL_COMMON_APPDATA": "Common AppData", "CSIDL_LOCAL_APPDATA": "Local AppData", }[csidl_name] key = _winreg.OpenKey( _winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" ) dir, type = _winreg.QueryValueEx(key, shell_folder_name) return dir def _get_win_folder_with_pywin32(csidl_name): from win32com.shell import shellcon, shell dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) # Try to make this a unicode path because SHGetFolderPath does # not return unicode strings when there is unicode data in the # path. try: dir = unicode(dir) # Downgrade to short path name if have highbit chars. See # <http://bugs.activestate.com/show_bug.cgi?id=85099>. has_high_char = False for c in dir: if ord(c) > 255: has_high_char = True break if has_high_char: try: import win32api dir = win32api.GetShortPathName(dir) except ImportError: pass except UnicodeError: pass return dir def _get_win_folder_with_ctypes(csidl_name): import ctypes csidl_const = { "CSIDL_APPDATA": 26, "CSIDL_COMMON_APPDATA": 35, "CSIDL_LOCAL_APPDATA": 28, }[csidl_name] buf = ctypes.create_unicode_buffer(1024) ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) # Downgrade to short path name if have highbit chars. See # <http://bugs.activestate.com/show_bug.cgi?id=85099>. has_high_char = False for c in buf: if ord(c) > 255: has_high_char = True break if has_high_char: buf2 = ctypes.create_unicode_buffer(1024) if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): buf = buf2 return buf.value def _get_win_folder_with_jna(csidl_name): import array from com.sun import jna from com.sun.jna.platform import win32 buf_size = win32.WinDef.MAX_PATH * 2 buf = array.zeros('c', buf_size) shell = win32.Shell32.INSTANCE shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) dir = jna.Native.toString(buf.tostring()).rstrip("\0") # Downgrade to short path name if have highbit chars. See # <http://bugs.activestate.com/show_bug.cgi?id=85099>. has_high_char = False for c in dir: if ord(c) > 255: has_high_char = True break if has_high_char: buf = array.zeros('c', buf_size) kernel = win32.Kernel32.INSTANCE if kernel.GetShortPathName(dir, buf, buf_size): dir = jna.Native.toString(buf.tostring()).rstrip("\0") return dir if system == "win32": try: import win32com.shell _get_win_folder = _get_win_folder_with_pywin32 except ImportError: try: from ctypes import windll _get_win_folder = _get_win_folder_with_ctypes except ImportError: try: import com.sun.jna _get_win_folder = _get_win_folder_with_jna except ImportError: _get_win_folder = _get_win_folder_from_registry #---- self test code if __name__ == "__main__": appname = "MyApp" appauthor = "MyCompany" props = ("user_data_dir", "user_config_dir", "user_cache_dir", "user_state_dir", "user_log_dir", "site_data_dir", "site_config_dir") print("-- app dirs %s --" % __version__) print("-- app dirs (with optional 'version')") dirs = AppDirs(appname, appauthor, version="1.0") for prop in props: print("%s: %s" % (prop, getattr(dirs, prop))) print("\n-- app dirs (without optional 'version')") dirs = AppDirs(appname, appauthor) for prop in props: print("%s: %s" % (prop, getattr(dirs, prop))) print("\n-- app dirs (without optional 'appauthor')") dirs = AppDirs(appname) for prop in props: print("%s: %s" % (prop, getattr(dirs, prop))) print("\n-- app dirs (with disabled 'appauthor')") dirs = AppDirs(appname, appauthor=False) for prop in props: print("%s: %s" % (prop, getattr(dirs, prop)))
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor/six.py
"""Utilities for writing code that runs on Python 2 and 3""" # Copyright (c) 2010-2015 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import import functools import itertools import operator import sys import types __author__ = "Benjamin Peterson <benjamin@python.org>" __version__ = "1.10.0" # Useful for very coarse version differentiation. PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 PY34 = sys.version_info[0:2] >= (3, 4) if PY3: string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str if sys.platform.startswith("java"): # Jython always uses 32 bits. MAXSIZE = int((1 << 31) - 1) else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): def __len__(self): return 1 << 31 try: len(X()) except OverflowError: # 32-bit MAXSIZE = int((1 << 31) - 1) else: # 64-bit MAXSIZE = int((1 << 63) - 1) del X def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc def _import_module(name): """Import module, returning the module after the last dot.""" __import__(name) return sys.modules[name] class _LazyDescr(object): def __init__(self, name): self.name = name def __get__(self, obj, tp): result = self._resolve() setattr(obj, self.name, result) # Invokes __set__. try: # This is a bit ugly, but it avoids running this again by # removing this descriptor. delattr(obj.__class__, self.name) except AttributeError: pass return result class MovedModule(_LazyDescr): def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: if new is None: new = name self.mod = new else: self.mod = old def _resolve(self): return _import_module(self.mod) def __getattr__(self, attr): _module = self._resolve() value = getattr(_module, attr) setattr(self, attr, value) return value class _LazyModule(types.ModuleType): def __init__(self, name): super(_LazyModule, self).__init__(name) self.__doc__ = self.__class__.__doc__ def __dir__(self): attrs = ["__doc__", "__name__"] attrs += [attr.name for attr in self._moved_attributes] return attrs # Subclasses should override this _moved_attributes = [] class MovedAttribute(_LazyDescr): def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: if new_mod is None: new_mod = name self.mod = new_mod if new_attr is None: if old_attr is None: new_attr = name else: new_attr = old_attr self.attr = new_attr else: self.mod = old_mod if old_attr is None: old_attr = name self.attr = old_attr def _resolve(self): module = _import_module(self.mod) return getattr(module, self.attr) class _SixMetaPathImporter(object): """ A meta path importer to import six.moves and its submodules. This class implements a PEP302 finder and loader. It should be compatible with Python 2.5 and all existing versions of Python3 """ def __init__(self, six_module_name): self.name = six_module_name self.known_modules = {} def _add_module(self, mod, *fullnames): for fullname in fullnames: self.known_modules[self.name + "." + fullname] = mod def _get_module(self, fullname): return self.known_modules[self.name + "." + fullname] def find_module(self, fullname, path=None): if fullname in self.known_modules: return self return None def __get_module(self, fullname): try: return self.known_modules[fullname] except KeyError: raise ImportError("This loader does not know module " + fullname) def load_module(self, fullname): try: # in case of a reload return sys.modules[fullname] except KeyError: pass mod = self.__get_module(fullname) if isinstance(mod, MovedModule): mod = mod._resolve() else: mod.__loader__ = self sys.modules[fullname] = mod return mod def is_package(self, fullname): """ Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451) """ return hasattr(self.__get_module(fullname), "__path__") def get_code(self, fullname): """Return None Required, if is_package is implemented""" self.__get_module(fullname) # eventually raises ImportError return None get_source = get_code # same as get_code _importer = _SixMetaPathImporter(__name__) class _MovedItems(_LazyModule): """Lazy loading of moved objects""" __path__ = [] # mark as package _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("intern", "__builtin__", "sys"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), MovedAttribute("UserDict", "UserDict", "collections"), MovedAttribute("UserList", "UserList", "collections"), MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), MovedModule("cPickle", "cPickle", "pickle"), MovedModule("queue", "Queue"), MovedModule("reprlib", "repr"), MovedModule("socketserver", "SocketServer"), MovedModule("_thread", "thread", "_thread"), MovedModule("tkinter", "Tkinter"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"), MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), ] # Add windows specific modules. if sys.platform == "win32": _moved_attributes += [ MovedModule("winreg", "_winreg"), ] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) if isinstance(attr, MovedModule): _importer._add_module(attr, "moves." + attr.name) del attr _MovedItems._moved_attributes = _moved_attributes moves = _MovedItems(__name__ + ".moves") _importer._add_module(moves, "moves") class Module_six_moves_urllib_parse(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_parse""" _urllib_parse_moved_attributes = [ MovedAttribute("ParseResult", "urlparse", "urllib.parse"), MovedAttribute("SplitResult", "urlparse", "urllib.parse"), MovedAttribute("parse_qs", "urlparse", "urllib.parse"), MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), MovedAttribute("urldefrag", "urlparse", "urllib.parse"), MovedAttribute("urljoin", "urlparse", "urllib.parse"), MovedAttribute("urlparse", "urlparse", "urllib.parse"), MovedAttribute("urlsplit", "urlparse", "urllib.parse"), MovedAttribute("urlunparse", "urlparse", "urllib.parse"), MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), MovedAttribute("quote", "urllib", "urllib.parse"), MovedAttribute("quote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("splitquery", "urllib", "urllib.parse"), MovedAttribute("splittag", "urllib", "urllib.parse"), MovedAttribute("splituser", "urllib", "urllib.parse"), MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), MovedAttribute("uses_params", "urlparse", "urllib.parse"), MovedAttribute("uses_query", "urlparse", "urllib.parse"), MovedAttribute("uses_relative", "urlparse", "urllib.parse"), ] for attr in _urllib_parse_moved_attributes: setattr(Module_six_moves_urllib_parse, attr.name, attr) del attr Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes _importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), "moves.urllib_parse", "moves.urllib.parse") class Module_six_moves_urllib_error(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_error""" _urllib_error_moved_attributes = [ MovedAttribute("URLError", "urllib2", "urllib.error"), MovedAttribute("HTTPError", "urllib2", "urllib.error"), MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), ] for attr in _urllib_error_moved_attributes: setattr(Module_six_moves_urllib_error, attr.name, attr) del attr Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes _importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), "moves.urllib_error", "moves.urllib.error") class Module_six_moves_urllib_request(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_request""" _urllib_request_moved_attributes = [ MovedAttribute("urlopen", "urllib2", "urllib.request"), MovedAttribute("install_opener", "urllib2", "urllib.request"), MovedAttribute("build_opener", "urllib2", "urllib.request"), MovedAttribute("pathname2url", "urllib", "urllib.request"), MovedAttribute("url2pathname", "urllib", "urllib.request"), MovedAttribute("getproxies", "urllib", "urllib.request"), MovedAttribute("Request", "urllib2", "urllib.request"), MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), MovedAttribute("BaseHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), MovedAttribute("FileHandler", "urllib2", "urllib.request"), MovedAttribute("FTPHandler", "urllib2", "urllib.request"), MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), MovedAttribute("urlretrieve", "urllib", "urllib.request"), MovedAttribute("urlcleanup", "urllib", "urllib.request"), MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) del attr Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes _importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), "moves.urllib_request", "moves.urllib.request") class Module_six_moves_urllib_response(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_response""" _urllib_response_moved_attributes = [ MovedAttribute("addbase", "urllib", "urllib.response"), MovedAttribute("addclosehook", "urllib", "urllib.response"), MovedAttribute("addinfo", "urllib", "urllib.response"), MovedAttribute("addinfourl", "urllib", "urllib.response"), ] for attr in _urllib_response_moved_attributes: setattr(Module_six_moves_urllib_response, attr.name, attr) del attr Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes _importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), "moves.urllib_response", "moves.urllib.response") class Module_six_moves_urllib_robotparser(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_robotparser""" _urllib_robotparser_moved_attributes = [ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), ] for attr in _urllib_robotparser_moved_attributes: setattr(Module_six_moves_urllib_robotparser, attr.name, attr) del attr Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes _importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), "moves.urllib_robotparser", "moves.urllib.robotparser") class Module_six_moves_urllib(types.ModuleType): """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" __path__ = [] # mark as package parse = _importer._get_module("moves.urllib_parse") error = _importer._get_module("moves.urllib_error") request = _importer._get_module("moves.urllib_request") response = _importer._get_module("moves.urllib_response") robotparser = _importer._get_module("moves.urllib_robotparser") def __dir__(self): return ['parse', 'error', 'request', 'response', 'robotparser'] _importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib") def add_move(move): """Add an item to six.moves.""" setattr(_MovedItems, move.name, move) def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,)) if PY3: _meth_func = "__func__" _meth_self = "__self__" _func_closure = "__closure__" _func_code = "__code__" _func_defaults = "__defaults__" _func_globals = "__globals__" else: _meth_func = "im_func" _meth_self = "im_self" _func_closure = "func_closure" _func_code = "func_code" _func_defaults = "func_defaults" _func_globals = "func_globals" try: advance_iterator = next except NameError: def advance_iterator(it): return it.next() next = advance_iterator try: callable = callable except NameError: def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) if PY3: def get_unbound_function(unbound): return unbound create_bound_method = types.MethodType def create_unbound_method(func, cls): return func Iterator = object else: def get_unbound_function(unbound): return unbound.im_func def create_bound_method(func, obj): return types.MethodType(func, obj, obj.__class__) def create_unbound_method(func, cls): return types.MethodType(func, None, cls) class Iterator(object): def next(self): return type(self).__next__(self) callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_closure = operator.attrgetter(_func_closure) get_function_code = operator.attrgetter(_func_code) get_function_defaults = operator.attrgetter(_func_defaults) get_function_globals = operator.attrgetter(_func_globals) if PY3: def iterkeys(d, **kw): return iter(d.keys(**kw)) def itervalues(d, **kw): return iter(d.values(**kw)) def iteritems(d, **kw): return iter(d.items(**kw)) def iterlists(d, **kw): return iter(d.lists(**kw)) viewkeys = operator.methodcaller("keys") viewvalues = operator.methodcaller("values") viewitems = operator.methodcaller("items") else: def iterkeys(d, **kw): return d.iterkeys(**kw) def itervalues(d, **kw): return d.itervalues(**kw) def iteritems(d, **kw): return d.iteritems(**kw) def iterlists(d, **kw): return d.iterlists(**kw) viewkeys = operator.methodcaller("viewkeys") viewvalues = operator.methodcaller("viewvalues") viewitems = operator.methodcaller("viewitems") _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") _add_doc(itervalues, "Return an iterator over the values of a dictionary.") _add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.") _add_doc(iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary.") if PY3: def b(s): return s.encode("latin-1") def u(s): return s unichr = chr import struct int2byte = struct.Struct(">B").pack del struct byte2int = operator.itemgetter(0) indexbytes = operator.getitem iterbytes = iter import io StringIO = io.StringIO BytesIO = io.BytesIO _assertCountEqual = "assertCountEqual" if sys.version_info[1] <= 1: _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" else: _assertRaisesRegex = "assertRaisesRegex" _assertRegex = "assertRegex" else: def b(s): return s # Workaround for standalone backslash def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") unichr = unichr int2byte = chr def byte2int(bs): return ord(bs[0]) def indexbytes(buf, i): return ord(buf[i]) iterbytes = functools.partial(itertools.imap, ord) import StringIO StringIO = BytesIO = StringIO.StringIO _assertCountEqual = "assertItemsEqual" _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") def assertCountEqual(self, *args, **kwargs): return getattr(self, _assertCountEqual)(*args, **kwargs) def assertRaisesRegex(self, *args, **kwargs): return getattr(self, _assertRaisesRegex)(*args, **kwargs) def assertRegex(self, *args, **kwargs): return getattr(self, _assertRegex)(*args, **kwargs) if PY3: exec_ = getattr(moves.builtins, "exec") def reraise(tp, value, tb=None): if value is None: value = tp() if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value else: def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb """) if sys.version_info[:2] == (3, 2): exec_("""def raise_from(value, from_value): if from_value is None: raise value raise value from from_value """) elif sys.version_info[:2] > (3, 2): exec_("""def raise_from(value, from_value): raise value from from_value """) else: def raise_from(value, from_value): raise value print_ = getattr(moves.builtins, "print", None) if print_ is None: def print_(*args, **kwargs): """The new-style print function for Python 2.4 and 2.5.""" fp = kwargs.pop("file", sys.stdout) if fp is None: return def write(data): if not isinstance(data, basestring): data = str(data) # If the file has an encoding, encode unicode with it. if (isinstance(fp, file) and isinstance(data, unicode) and fp.encoding is not None): errors = getattr(fp, "errors", None) if errors is None: errors = "strict" data = data.encode(fp.encoding, errors) fp.write(data) want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: if isinstance(sep, unicode): want_unicode = True elif not isinstance(sep, str): raise TypeError("sep must be None or a string") end = kwargs.pop("end", None) if end is not None: if isinstance(end, unicode): want_unicode = True elif not isinstance(end, str): raise TypeError("end must be None or a string") if kwargs: raise TypeError("invalid keyword arguments to print()") if not want_unicode: for arg in args: if isinstance(arg, unicode): want_unicode = True break if want_unicode: newline = unicode("\n") space = unicode(" ") else: newline = "\n" space = " " if sep is None: sep = space if end is None: end = newline for i, arg in enumerate(args): if i: write(sep) write(arg) write(end) if sys.version_info[:2] < (3, 3): _print = print_ def print_(*args, **kwargs): fp = kwargs.get("file", sys.stdout) flush = kwargs.pop("flush", False) _print(*args, **kwargs) if flush and fp is not None: fp.flush() _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): def wrapper(f): f = functools.wraps(wrapped, assigned, updated)(f) f.__wrapped__ = wrapped return f return wrapper else: wraps = functools.wraps def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, 'temporary_class', (), {}) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. """ if PY2: if '__str__' not in klass.__dict__: raise ValueError("@python_2_unicode_compatible cannot be applied " "to %s because it doesn't define __str__()." % klass.__name__) klass.__unicode__ = klass.__str__ klass.__str__ = lambda self: self.__unicode__().encode('utf-8') return klass # Complete the moves implementation. # This code is at the end of this module to speed up module loading. # Turn this module into a package. __path__ = [] # required for PEP 302 and PEP 451 __package__ = __name__ # see PEP 366 @ReservedAssignment if globals().get("__spec__") is not None: __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable # Remove other six meta path importers, since they cause problems. This can # happen if six is removed from sys.modules and then reloaded. (Setuptools does # this for some reason.) if sys.meta_path: for i, importer in enumerate(sys.meta_path): # Here's some real nastiness: Another "instance" of the six module might # be floating around. Therefore, we can't use isinstance() to check for # the six meta path importer, since the other six instance will have # inserted an importer with different class. if (type(importer).__name__ == "_SixMetaPathImporter" and importer.name == __name__): del sys.meta_path[i] break del i, importer # Finally, add the importer to the meta path import hook. sys.meta_path.append(_importer)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor/pyparsing.py
# module pyparsing.py # # Copyright (c) 2003-2018 Paul T. McGuire # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __doc__ = \ """ pyparsing module - Classes and methods to define and execute parsing grammars ============================================================================= The pyparsing module is an alternative approach to creating and executing simple grammars, vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you don't need to learn a new syntax for defining grammars or matching expressions - the parsing module provides a library of classes that you use to construct the grammar directly in Python. Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements (L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to L{Literal} expressions):: from pyparsing import Word, alphas # define grammar of a greeting greet = Word(alphas) + "," + Word(alphas) + "!" hello = "Hello, World!" print (hello, "->", greet.parseString(hello)) The program outputs the following:: Hello, World! -> ['Hello', ',', 'World', '!'] The Python representation of the grammar is quite readable, owing to the self-explanatory class names, and the use of '+', '|' and '^' operators. The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an object with named attributes. The pyparsing module handles some of the problems that are typically vexing when writing text parsers: - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) - quoted strings - embedded comments Getting Started - ----------------- Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing classes inherit from. Use the docstrings for examples of how to: - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes - construct character word-group expressions using the L{Word} class - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes - use L{'+'<And>}, L{'|'<MatchFirst>}, L{'^'<Or>}, and L{'&'<Each>} operators to combine simple expressions into more complex ones - associate names with your parsed results using L{ParserElement.setResultsName} - find some helpful expression short-cuts like L{delimitedList} and L{oneOf} - find more useful common expressions in the L{pyparsing_common} namespace class """ __version__ = "2.2.1" __versionTime__ = "18 Sep 2018 00:49 UTC" __author__ = "Paul McGuire <ptmcg@users.sourceforge.net>" import string from weakref import ref as wkref import copy import sys import warnings import re import sre_constants import collections import pprint import traceback import types from datetime import datetime try: from _thread import RLock except ImportError: from threading import RLock try: # Python 3 from collections.abc import Iterable from collections.abc import MutableMapping except ImportError: # Python 2.7 from collections import Iterable from collections import MutableMapping try: from collections import OrderedDict as _OrderedDict except ImportError: try: from ordereddict import OrderedDict as _OrderedDict except ImportError: _OrderedDict = None #~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) __all__ = [ 'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', 'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', 'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', 'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', 'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', 'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', 'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', 'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', 'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', 'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', 'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', 'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', 'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass', 'CloseMatch', 'tokenMap', 'pyparsing_common', ] system_version = tuple(sys.version_info)[:3] PY_3 = system_version[0] == 3 if PY_3: _MAX_INT = sys.maxsize basestring = str unichr = chr _ustr = str # build list of single arg builtins, that can be used as parse actions singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max] else: _MAX_INT = sys.maxint range = xrange def _ustr(obj): """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It then < returns the unicode object | encodes it with the default encoding | ... >. """ if isinstance(obj,unicode): return obj try: # If this works, then _ustr(obj) has the same behaviour as str(obj), so # it won't break any existing code. return str(obj) except UnicodeEncodeError: # Else encode it ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace') xmlcharref = Regex(r'&#\d+;') xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:]) return xmlcharref.transformString(ret) # build list of single arg builtins, tolerant of Python version, that can be used as parse actions singleArgBuiltins = [] import __builtin__ for fname in "sum len sorted reversed list tuple set any all min max".split(): try: singleArgBuiltins.append(getattr(__builtin__,fname)) except AttributeError: continue _generatorType = type((y for y in range(1))) def _xml_escape(data): """Escape &, <, >, ", ', etc. in a string of data.""" # ampersand must be replaced first from_symbols = '&><"\'' to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split()) for from_,to_ in zip(from_symbols, to_symbols): data = data.replace(from_, to_) return data class _Constants(object): pass alphas = string.ascii_uppercase + string.ascii_lowercase nums = "0123456789" hexnums = nums + "ABCDEFabcdef" alphanums = alphas + nums _bslash = chr(92) printables = "".join(c for c in string.printable if c not in string.whitespace) class ParseBaseException(Exception): """base exception class for all parsing runtime exceptions""" # Performance tuning: we construct a *lot* of these, so keep this # constructor as small and fast as possible def __init__( self, pstr, loc=0, msg=None, elem=None ): self.loc = loc if msg is None: self.msg = pstr self.pstr = "" else: self.msg = msg self.pstr = pstr self.parserElement = elem self.args = (pstr, loc, msg) @classmethod def _from_exception(cls, pe): """ internal factory method to simplify creating one type of ParseException from another - avoids having __init__ signature conflicts among subclasses """ return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) def __getattr__( self, aname ): """supported attributes by name are: - lineno - returns the line number of the exception text - col - returns the column number of the exception text - line - returns the line containing the exception text """ if( aname == "lineno" ): return lineno( self.loc, self.pstr ) elif( aname in ("col", "column") ): return col( self.loc, self.pstr ) elif( aname == "line" ): return line( self.loc, self.pstr ) else: raise AttributeError(aname) def __str__( self ): return "%s (at char %d), (line:%d, col:%d)" % \ ( self.msg, self.loc, self.lineno, self.column ) def __repr__( self ): return _ustr(self) def markInputline( self, markerString = ">!<" ): """Extracts the exception line from the input string, and marks the location of the exception with a special symbol. """ line_str = self.line line_column = self.column - 1 if markerString: line_str = "".join((line_str[:line_column], markerString, line_str[line_column:])) return line_str.strip() def __dir__(self): return "lineno col line".split() + dir(type(self)) class ParseException(ParseBaseException): """ Exception thrown when parse expressions don't match class; supported attributes by name are: - lineno - returns the line number of the exception text - col - returns the column number of the exception text - line - returns the line containing the exception text Example:: try: Word(nums).setName("integer").parseString("ABC") except ParseException as pe: print(pe) print("column: {}".format(pe.col)) prints:: Expected integer (at char 0), (line:1, col:1) column: 1 """ pass class ParseFatalException(ParseBaseException): """user-throwable exception thrown when inconsistent parse content is found; stops all parsing immediately""" pass class ParseSyntaxException(ParseFatalException): """just like L{ParseFatalException}, but thrown internally when an L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop immediately because an unbacktrackable syntax error has been found""" pass #~ class ReparseException(ParseBaseException): #~ """Experimental class - parse actions can raise this exception to cause #~ pyparsing to reparse the input string: #~ - with a modified input string, and/or #~ - with a modified start location #~ Set the values of the ReparseException in the constructor, and raise the #~ exception in a parse action to cause pyparsing to use the new string/location. #~ Setting the values as None causes no change to be made. #~ """ #~ def __init_( self, newstring, restartLoc ): #~ self.newParseText = newstring #~ self.reparseLoc = restartLoc class RecursiveGrammarException(Exception): """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive""" def __init__( self, parseElementList ): self.parseElementTrace = parseElementList def __str__( self ): return "RecursiveGrammarException: %s" % self.parseElementTrace class _ParseResultsWithOffset(object): def __init__(self,p1,p2): self.tup = (p1,p2) def __getitem__(self,i): return self.tup[i] def __repr__(self): return repr(self.tup[0]) def setOffset(self,i): self.tup = (self.tup[0],i) class ParseResults(object): """ Structured parse results, to provide multiple means of access to the parsed data: - as a list (C{len(results)}) - by list index (C{results[0], results[1]}, etc.) - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName}) Example:: integer = Word(nums) date_str = (integer.setResultsName("year") + '/' + integer.setResultsName("month") + '/' + integer.setResultsName("day")) # equivalent form: # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") # parseString returns a ParseResults object result = date_str.parseString("1999/12/31") def test(s, fn=repr): print("%s -> %s" % (s, fn(eval(s)))) test("list(result)") test("result[0]") test("result['month']") test("result.day") test("'month' in result") test("'minutes' in result") test("result.dump()", str) prints:: list(result) -> ['1999', '/', '12', '/', '31'] result[0] -> '1999' result['month'] -> '12' result.day -> '31' 'month' in result -> True 'minutes' in result -> False result.dump() -> ['1999', '/', '12', '/', '31'] - day: 31 - month: 12 - year: 1999 """ def __new__(cls, toklist=None, name=None, asList=True, modal=True ): if isinstance(toklist, cls): return toklist retobj = object.__new__(cls) retobj.__doinit = True return retobj # Performance tuning: we construct a *lot* of these, so keep this # constructor as small and fast as possible def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ): if self.__doinit: self.__doinit = False self.__name = None self.__parent = None self.__accumNames = {} self.__asList = asList self.__modal = modal if toklist is None: toklist = [] if isinstance(toklist, list): self.__toklist = toklist[:] elif isinstance(toklist, _generatorType): self.__toklist = list(toklist) else: self.__toklist = [toklist] self.__tokdict = dict() if name is not None and name: if not modal: self.__accumNames[name] = 0 if isinstance(name,int): name = _ustr(name) # will always return a str, but use _ustr for consistency self.__name = name if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])): if isinstance(toklist,basestring): toklist = [ toklist ] if asList: if isinstance(toklist,ParseResults): self[name] = _ParseResultsWithOffset(toklist.copy(),0) else: self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0) self[name].__name = name else: try: self[name] = toklist[0] except (KeyError,TypeError,IndexError): self[name] = toklist def __getitem__( self, i ): if isinstance( i, (int,slice) ): return self.__toklist[i] else: if i not in self.__accumNames: return self.__tokdict[i][-1][0] else: return ParseResults([ v[0] for v in self.__tokdict[i] ]) def __setitem__( self, k, v, isinstance=isinstance ): if isinstance(v,_ParseResultsWithOffset): self.__tokdict[k] = self.__tokdict.get(k,list()) + [v] sub = v[0] elif isinstance(k,(int,slice)): self.__toklist[k] = v sub = v else: self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)] sub = v if isinstance(sub,ParseResults): sub.__parent = wkref(self) def __delitem__( self, i ): if isinstance(i,(int,slice)): mylen = len( self.__toklist ) del self.__toklist[i] # convert int to slice if isinstance(i, int): if i < 0: i += mylen i = slice(i, i+1) # get removed indices removed = list(range(*i.indices(mylen))) removed.reverse() # fixup indices in token dictionary for name,occurrences in self.__tokdict.items(): for j in removed: for k, (value, position) in enumerate(occurrences): occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) else: del self.__tokdict[i] def __contains__( self, k ): return k in self.__tokdict def __len__( self ): return len( self.__toklist ) def __bool__(self): return ( not not self.__toklist ) __nonzero__ = __bool__ def __iter__( self ): return iter( self.__toklist ) def __reversed__( self ): return iter( self.__toklist[::-1] ) def _iterkeys( self ): if hasattr(self.__tokdict, "iterkeys"): return self.__tokdict.iterkeys() else: return iter(self.__tokdict) def _itervalues( self ): return (self[k] for k in self._iterkeys()) def _iteritems( self ): return ((k, self[k]) for k in self._iterkeys()) if PY_3: keys = _iterkeys """Returns an iterator of all named result keys (Python 3.x only).""" values = _itervalues """Returns an iterator of all named result values (Python 3.x only).""" items = _iteritems """Returns an iterator of all named result key-value tuples (Python 3.x only).""" else: iterkeys = _iterkeys """Returns an iterator of all named result keys (Python 2.x only).""" itervalues = _itervalues """Returns an iterator of all named result values (Python 2.x only).""" iteritems = _iteritems """Returns an iterator of all named result key-value tuples (Python 2.x only).""" def keys( self ): """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).""" return list(self.iterkeys()) def values( self ): """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).""" return list(self.itervalues()) def items( self ): """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).""" return list(self.iteritems()) def haskeys( self ): """Since keys() returns an iterator, this method is helpful in bypassing code that looks for the existence of any defined results names.""" return bool(self.__tokdict) def pop( self, *args, **kwargs): """ Removes and returns item at specified index (default=C{last}). Supports both C{list} and C{dict} semantics for C{pop()}. If passed no argument or an integer argument, it will use C{list} semantics and pop tokens from the list of parsed tokens. If passed a non-integer argument (most likely a string), it will use C{dict} semantics and pop the corresponding value from any defined results names. A second default return value argument is supported, just as in C{dict.pop()}. Example:: def remove_first(tokens): tokens.pop(0) print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] label = Word(alphas) patt = label("LABEL") + OneOrMore(Word(nums)) print(patt.parseString("AAB 123 321").dump()) # Use pop() in a parse action to remove named result (note that corresponding value is not # removed from list form of results) def remove_LABEL(tokens): tokens.pop("LABEL") return tokens patt.addParseAction(remove_LABEL) print(patt.parseString("AAB 123 321").dump()) prints:: ['AAB', '123', '321'] - LABEL: AAB ['AAB', '123', '321'] """ if not args: args = [-1] for k,v in kwargs.items(): if k == 'default': args = (args[0], v) else: raise TypeError("pop() got an unexpected keyword argument '%s'" % k) if (isinstance(args[0], int) or len(args) == 1 or args[0] in self): index = args[0] ret = self[index] del self[index] return ret else: defaultvalue = args[1] return defaultvalue def get(self, key, defaultValue=None): """ Returns named result matching the given key, or if there is no such name, then returns the given C{defaultValue} or C{None} if no C{defaultValue} is specified. Similar to C{dict.get()}. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString("1999/12/31") print(result.get("year")) # -> '1999' print(result.get("hour", "not specified")) # -> 'not specified' print(result.get("hour")) # -> None """ if key in self: return self[key] else: return defaultValue def insert( self, index, insStr ): """ Inserts new element at location index in the list of parsed tokens. Similar to C{list.insert()}. Example:: print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] # use a parse action to insert the parse location in the front of the parsed results def insert_locn(locn, tokens): tokens.insert(0, locn) print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] """ self.__toklist.insert(index, insStr) # fixup indices in token dictionary for name,occurrences in self.__tokdict.items(): for k, (value, position) in enumerate(occurrences): occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) def append( self, item ): """ Add single element to end of ParseResults list of elements. Example:: print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] # use a parse action to compute the sum of the parsed integers, and add it to the end def append_sum(tokens): tokens.append(sum(map(int, tokens))) print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] """ self.__toklist.append(item) def extend( self, itemseq ): """ Add sequence of elements to end of ParseResults list of elements. Example:: patt = OneOrMore(Word(alphas)) # use a parse action to append the reverse of the matched strings, to make a palindrome def make_palindrome(tokens): tokens.extend(reversed([t[::-1] for t in tokens])) return ''.join(tokens) print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' """ if isinstance(itemseq, ParseResults): self += itemseq else: self.__toklist.extend(itemseq) def clear( self ): """ Clear all elements and results names. """ del self.__toklist[:] self.__tokdict.clear() def __getattr__( self, name ): try: return self[name] except KeyError: return "" if name in self.__tokdict: if name not in self.__accumNames: return self.__tokdict[name][-1][0] else: return ParseResults([ v[0] for v in self.__tokdict[name] ]) else: return "" def __add__( self, other ): ret = self.copy() ret += other return ret def __iadd__( self, other ): if other.__tokdict: offset = len(self.__toklist) addoffset = lambda a: offset if a<0 else a+offset otheritems = other.__tokdict.items() otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) ) for (k,vlist) in otheritems for v in vlist] for k,v in otherdictitems: self[k] = v if isinstance(v[0],ParseResults): v[0].__parent = wkref(self) self.__toklist += other.__toklist self.__accumNames.update( other.__accumNames ) return self def __radd__(self, other): if isinstance(other,int) and other == 0: # useful for merging many ParseResults using sum() builtin return self.copy() else: # this may raise a TypeError - so be it return other + self def __repr__( self ): return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) def __str__( self ): return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']' def _asStringList( self, sep='' ): out = [] for item in self.__toklist: if out and sep: out.append(sep) if isinstance( item, ParseResults ): out += item._asStringList() else: out.append( _ustr(item) ) return out def asList( self ): """ Returns the parse results as a nested list of matching tokens, all converted to strings. Example:: patt = OneOrMore(Word(alphas)) result = patt.parseString("sldkj lsdkj sldkj") # even though the result prints in string-like form, it is actually a pyparsing ParseResults print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj'] # Use asList() to create an actual list result_list = result.asList() print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj'] """ return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist] def asDict( self ): """ Returns the named parse results as a nested dictionary. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString('12/31/1999') print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) result_dict = result.asDict() print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'} # even though a ParseResults supports dict-like access, sometime you just need to have a dict import json print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} """ if PY_3: item_fn = self.items else: item_fn = self.iteritems def toItem(obj): if isinstance(obj, ParseResults): if obj.haskeys(): return obj.asDict() else: return [toItem(v) for v in obj] else: return obj return dict((k,toItem(v)) for k,v in item_fn()) def copy( self ): """ Returns a new copy of a C{ParseResults} object. """ ret = ParseResults( self.__toklist ) ret.__tokdict = self.__tokdict.copy() ret.__parent = self.__parent ret.__accumNames.update( self.__accumNames ) ret.__name = self.__name return ret def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): """ (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. """ nl = "\n" out = [] namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items() for v in vlist) nextLevelIndent = indent + " " # collapse out indents if formatting is not desired if not formatted: indent = "" nextLevelIndent = "" nl = "" selfTag = None if doctag is not None: selfTag = doctag else: if self.__name: selfTag = self.__name if not selfTag: if namedItemsOnly: return "" else: selfTag = "ITEM" out += [ nl, indent, "<", selfTag, ">" ] for i,res in enumerate(self.__toklist): if isinstance(res,ParseResults): if i in namedItems: out += [ res.asXML(namedItems[i], namedItemsOnly and doctag is None, nextLevelIndent, formatted)] else: out += [ res.asXML(None, namedItemsOnly and doctag is None, nextLevelIndent, formatted)] else: # individual token, see if there is a name for it resTag = None if i in namedItems: resTag = namedItems[i] if not resTag: if namedItemsOnly: continue else: resTag = "ITEM" xmlBodyText = _xml_escape(_ustr(res)) out += [ nl, nextLevelIndent, "<", resTag, ">", xmlBodyText, "</", resTag, ">" ] out += [ nl, indent, "</", selfTag, ">" ] return "".join(out) def __lookup(self,sub): for k,vlist in self.__tokdict.items(): for v,loc in vlist: if sub is v: return k return None def getName(self): r""" Returns the results name for this token expression. Useful when several different expressions might match at a particular location. Example:: integer = Word(nums) ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") house_number_expr = Suppress('#') + Word(nums, alphanums) user_data = (Group(house_number_expr)("house_number") | Group(ssn_expr)("ssn") | Group(integer)("age")) user_info = OneOrMore(user_data) result = user_info.parseString("22 111-22-3333 #221B") for item in result: print(item.getName(), ':', item[0]) prints:: age : 22 ssn : 111-22-3333 house_number : 221B """ if self.__name: return self.__name elif self.__parent: par = self.__parent() if par: return par.__lookup(self) else: return None elif (len(self) == 1 and len(self.__tokdict) == 1 and next(iter(self.__tokdict.values()))[0][1] in (0,-1)): return next(iter(self.__tokdict.keys())) else: return None def dump(self, indent='', depth=0, full=True): """ Diagnostic method for listing out the contents of a C{ParseResults}. Accepts an optional C{indent} argument so that this string can be embedded in a nested display of other data. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString('12/31/1999') print(result.dump()) prints:: ['12', '/', '31', '/', '1999'] - day: 1999 - month: 31 - year: 12 """ out = [] NL = '\n' out.append( indent+_ustr(self.asList()) ) if full: if self.haskeys(): items = sorted((str(k), v) for k,v in self.items()) for k,v in items: if out: out.append(NL) out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) if isinstance(v,ParseResults): if v: out.append( v.dump(indent,depth+1) ) else: out.append(_ustr(v)) else: out.append(repr(v)) elif any(isinstance(vv,ParseResults) for vv in self): v = self for i,vv in enumerate(v): if isinstance(vv,ParseResults): out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) )) else: out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv))) return "".join(out) def pprint(self, *args, **kwargs): """ Pretty-printer for parsed results as a list, using the C{pprint} module. Accepts additional positional or keyword args as defined for the C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint}) Example:: ident = Word(alphas, alphanums) num = Word(nums) func = Forward() term = ident | num | Group('(' + func + ')') func <<= ident + Group(Optional(delimitedList(term))) result = func.parseString("fna a,b,(fnb c,d,200),100") result.pprint(width=40) prints:: ['fna', ['a', 'b', ['(', 'fnb', ['c', 'd', '200'], ')'], '100']] """ pprint.pprint(self.asList(), *args, **kwargs) # add support for pickle protocol def __getstate__(self): return ( self.__toklist, ( self.__tokdict.copy(), self.__parent is not None and self.__parent() or None, self.__accumNames, self.__name ) ) def __setstate__(self,state): self.__toklist = state[0] (self.__tokdict, par, inAccumNames, self.__name) = state[1] self.__accumNames = {} self.__accumNames.update(inAccumNames) if par is not None: self.__parent = wkref(par) else: self.__parent = None def __getnewargs__(self): return self.__toklist, self.__name, self.__asList, self.__modal def __dir__(self): return (dir(type(self)) + list(self.keys())) MutableMapping.register(ParseResults) def col (loc,strg): """Returns current column within a string, counting newlines as line separators. The first column is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information on parsing strings containing C{<TAB>}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. """ s = strg return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc) def lineno(loc,strg): """Returns current line number within a string, counting newlines as line separators. The first line is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information on parsing strings containing C{<TAB>}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. """ return strg.count("\n",0,loc) + 1 def line( loc, strg ): """Returns the line of text containing loc within a string, counting newlines as line separators. """ lastCR = strg.rfind("\n", 0, loc) nextCR = strg.find("\n", loc) if nextCR >= 0: return strg[lastCR+1:nextCR] else: return strg[lastCR+1:] def _defaultStartDebugAction( instring, loc, expr ): print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))) def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ): print ("Matched " + _ustr(expr) + " -> " + str(toks.asList())) def _defaultExceptionDebugAction( instring, loc, expr, exc ): print ("Exception raised:" + _ustr(exc)) def nullDebugAction(*args): """'Do-nothing' debug action, to suppress debugging output during parsing.""" pass # Only works on Python 3.x - nonlocal is toxic to Python 2 installs #~ 'decorator to trim function calls to match the arity of the target' #~ def _trim_arity(func, maxargs=3): #~ if func in singleArgBuiltins: #~ return lambda s,l,t: func(t) #~ limit = 0 #~ foundArity = False #~ def wrapper(*args): #~ nonlocal limit,foundArity #~ while 1: #~ try: #~ ret = func(*args[limit:]) #~ foundArity = True #~ return ret #~ except TypeError: #~ if limit == maxargs or foundArity: #~ raise #~ limit += 1 #~ continue #~ return wrapper # this version is Python 2.x-3.x cross-compatible 'decorator to trim function calls to match the arity of the target' def _trim_arity(func, maxargs=2): if func in singleArgBuiltins: return lambda s,l,t: func(t) limit = [0] foundArity = [False] # traceback return data structure changed in Py3.5 - normalize back to plain tuples if system_version[:2] >= (3,5): def extract_stack(limit=0): # special handling for Python 3.5.0 - extra deep call stack by 1 offset = -3 if system_version == (3,5,0) else -2 frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset] return [frame_summary[:2]] def extract_tb(tb, limit=0): frames = traceback.extract_tb(tb, limit=limit) frame_summary = frames[-1] return [frame_summary[:2]] else: extract_stack = traceback.extract_stack extract_tb = traceback.extract_tb # synthesize what would be returned by traceback.extract_stack at the call to # user's parse action 'func', so that we don't incur call penalty at parse time LINE_DIFF = 6 # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! this_line = extract_stack(limit=2)[-1] pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF) def wrapper(*args): while 1: try: ret = func(*args[limit[0]:]) foundArity[0] = True return ret except TypeError: # re-raise TypeErrors if they did not come from our arity testing if foundArity[0]: raise else: try: tb = sys.exc_info()[-1] if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth: raise finally: del tb if limit[0] <= maxargs: limit[0] += 1 continue raise # copy func name to wrapper for sensible debug output func_name = "<parse action>" try: func_name = getattr(func, '__name__', getattr(func, '__class__').__name__) except Exception: func_name = str(func) wrapper.__name__ = func_name return wrapper class ParserElement(object): """Abstract base level parser element class.""" DEFAULT_WHITE_CHARS = " \n\t\r" verbose_stacktrace = False @staticmethod def setDefaultWhitespaceChars( chars ): r""" Overrides the default whitespace chars Example:: # default whitespace chars are space, <TAB> and newline OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] # change to just treat newline as significant ParserElement.setDefaultWhitespaceChars(" \t") OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] """ ParserElement.DEFAULT_WHITE_CHARS = chars @staticmethod def inlineLiteralsUsing(cls): """ Set class to be used for inclusion of string literals into a parser. Example:: # default literal class used is Literal integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] # change to Suppress ParserElement.inlineLiteralsUsing(Suppress) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] """ ParserElement._literalStringClass = cls def __init__( self, savelist=False ): self.parseAction = list() self.failAction = None #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall self.strRepr = None self.resultsName = None self.saveAsList = savelist self.skipWhitespace = True self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS self.copyDefaultWhiteChars = True self.mayReturnEmpty = False # used when checking for left-recursion self.keepTabs = False self.ignoreExprs = list() self.debug = False self.streamlined = False self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index self.errmsg = "" self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) self.debugActions = ( None, None, None ) #custom debug actions self.re = None self.callPreparse = True # used to avoid redundant calls to preParse self.callDuringTry = False def copy( self ): """ Make a copy of this C{ParserElement}. Useful for defining different parse actions for the same parsing pattern, using copies of the original parse element. Example:: integer = Word(nums).setParseAction(lambda toks: int(toks[0])) integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) prints:: [5120, 100, 655360, 268435456] Equivalent form of C{expr.copy()} is just C{expr()}:: integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") """ cpy = copy.copy( self ) cpy.parseAction = self.parseAction[:] cpy.ignoreExprs = self.ignoreExprs[:] if self.copyDefaultWhiteChars: cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS return cpy def setName( self, name ): """ Define name for this expression, makes debugging and exception messages clearer. Example:: Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) """ self.name = name self.errmsg = "Expected " + self.name if hasattr(self,"exception"): self.exception.msg = self.errmsg return self def setResultsName( self, name, listAllMatches=False ): """ Define name for referencing matching tokens as a nested attribute of the returned parse results. NOTE: this returns a *copy* of the original C{ParserElement} object; this is so that the client can define a basic element, such as an integer, and reference it in multiple places with different names. You can also set results names using the abbreviated syntax, C{expr("name")} in place of C{expr.setResultsName("name")} - see L{I{__call__}<__call__>}. Example:: date_str = (integer.setResultsName("year") + '/' + integer.setResultsName("month") + '/' + integer.setResultsName("day")) # equivalent form: date_str = integer("year") + '/' + integer("month") + '/' + integer("day") """ newself = self.copy() if name.endswith("*"): name = name[:-1] listAllMatches=True newself.resultsName = name newself.modalResults = not listAllMatches return newself def setBreak(self,breakFlag = True): """Method to invoke the Python pdb debugger when this element is about to be parsed. Set C{breakFlag} to True to enable, False to disable. """ if breakFlag: _parseMethod = self._parse def breaker(instring, loc, doActions=True, callPreParse=True): import pdb pdb.set_trace() return _parseMethod( instring, loc, doActions, callPreParse ) breaker._originalParseMethod = _parseMethod self._parse = breaker else: if hasattr(self._parse,"_originalParseMethod"): self._parse = self._parse._originalParseMethod return self def setParseAction( self, *fns, **kwargs ): """ Define one or more actions to perform when successfully matching parse element definition. Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)}, C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where: - s = the original string being parsed (see note below) - loc = the location of the matching substring - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object If the functions in fns modify the tokens, they can return them as the return value from fn, and the modified list of tokens will replace the original. Otherwise, fn does not need to return any value. Optional keyword arguments: - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{parseString}<parseString>} for more information on parsing strings containing C{<TAB>}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. Example:: integer = Word(nums) date_str = integer + '/' + integer + '/' + integer date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] # use parse action to convert to ints at parse time integer = Word(nums).setParseAction(lambda toks: int(toks[0])) date_str = integer + '/' + integer + '/' + integer # note that integer fields are now ints, not strings date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] """ self.parseAction = list(map(_trim_arity, list(fns))) self.callDuringTry = kwargs.get("callDuringTry", False) return self def addParseAction( self, *fns, **kwargs ): """ Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}. See examples in L{I{copy}<copy>}. """ self.parseAction += list(map(_trim_arity, list(fns))) self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) return self def addCondition(self, *fns, **kwargs): """Add a boolean predicate function to expression's list of parse actions. See L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction}, functions passed to C{addCondition} need to return boolean success/fail of the condition. Optional keyword arguments: - message = define a custom message to be used in the raised exception - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException Example:: integer = Word(nums).setParseAction(lambda toks: int(toks[0])) year_int = integer.copy() year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") date_str = year_int + '/' + integer + '/' + integer result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) """ msg = kwargs.get("message", "failed user-defined condition") exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException for fn in fns: def pa(s,l,t): if not bool(_trim_arity(fn)(s,l,t)): raise exc_type(s,l,msg) self.parseAction.append(pa) self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) return self def setFailAction( self, fn ): """Define action to perform if parsing fails at this expression. Fail acton fn is a callable function that takes the arguments C{fn(s,loc,expr,err)} where: - s = string being parsed - loc = location where expression match was attempted and failed - expr = the parse expression that failed - err = the exception thrown The function returns no value. It may throw C{L{ParseFatalException}} if it is desired to stop parsing immediately.""" self.failAction = fn return self def _skipIgnorables( self, instring, loc ): exprsFound = True while exprsFound: exprsFound = False for e in self.ignoreExprs: try: while 1: loc,dummy = e._parse( instring, loc ) exprsFound = True except ParseException: pass return loc def preParse( self, instring, loc ): if self.ignoreExprs: loc = self._skipIgnorables( instring, loc ) if self.skipWhitespace: wt = self.whiteChars instrlen = len(instring) while loc < instrlen and instring[loc] in wt: loc += 1 return loc def parseImpl( self, instring, loc, doActions=True ): return loc, [] def postParse( self, instring, loc, tokenlist ): return tokenlist #~ @profile def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ): debugging = ( self.debug ) #and doActions ) if debugging or self.failAction: #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )) if (self.debugActions[0] ): self.debugActions[0]( instring, loc, self ) if callPreParse and self.callPreparse: preloc = self.preParse( instring, loc ) else: preloc = loc tokensStart = preloc try: try: loc,tokens = self.parseImpl( instring, preloc, doActions ) except IndexError: raise ParseException( instring, len(instring), self.errmsg, self ) except ParseBaseException as err: #~ print ("Exception raised:", err) if self.debugActions[2]: self.debugActions[2]( instring, tokensStart, self, err ) if self.failAction: self.failAction( instring, tokensStart, self, err ) raise else: if callPreParse and self.callPreparse: preloc = self.preParse( instring, loc ) else: preloc = loc tokensStart = preloc if self.mayIndexError or preloc >= len(instring): try: loc,tokens = self.parseImpl( instring, preloc, doActions ) except IndexError: raise ParseException( instring, len(instring), self.errmsg, self ) else: loc,tokens = self.parseImpl( instring, preloc, doActions ) tokens = self.postParse( instring, loc, tokens ) retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults ) if self.parseAction and (doActions or self.callDuringTry): if debugging: try: for fn in self.parseAction: tokens = fn( instring, tokensStart, retTokens ) if tokens is not None: retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), modal=self.modalResults ) except ParseBaseException as err: #~ print "Exception raised in user parse action:", err if (self.debugActions[2] ): self.debugActions[2]( instring, tokensStart, self, err ) raise else: for fn in self.parseAction: tokens = fn( instring, tokensStart, retTokens ) if tokens is not None: retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), modal=self.modalResults ) if debugging: #~ print ("Matched",self,"->",retTokens.asList()) if (self.debugActions[1] ): self.debugActions[1]( instring, tokensStart, loc, self, retTokens ) return loc, retTokens def tryParse( self, instring, loc ): try: return self._parse( instring, loc, doActions=False )[0] except ParseFatalException: raise ParseException( instring, loc, self.errmsg, self) def canParseNext(self, instring, loc): try: self.tryParse(instring, loc) except (ParseException, IndexError): return False else: return True class _UnboundedCache(object): def __init__(self): cache = {} self.not_in_cache = not_in_cache = object() def get(self, key): return cache.get(key, not_in_cache) def set(self, key, value): cache[key] = value def clear(self): cache.clear() def cache_len(self): return len(cache) self.get = types.MethodType(get, self) self.set = types.MethodType(set, self) self.clear = types.MethodType(clear, self) self.__len__ = types.MethodType(cache_len, self) if _OrderedDict is not None: class _FifoCache(object): def __init__(self, size): self.not_in_cache = not_in_cache = object() cache = _OrderedDict() def get(self, key): return cache.get(key, not_in_cache) def set(self, key, value): cache[key] = value while len(cache) > size: try: cache.popitem(False) except KeyError: pass def clear(self): cache.clear() def cache_len(self): return len(cache) self.get = types.MethodType(get, self) self.set = types.MethodType(set, self) self.clear = types.MethodType(clear, self) self.__len__ = types.MethodType(cache_len, self) else: class _FifoCache(object): def __init__(self, size): self.not_in_cache = not_in_cache = object() cache = {} key_fifo = collections.deque([], size) def get(self, key): return cache.get(key, not_in_cache) def set(self, key, value): cache[key] = value while len(key_fifo) > size: cache.pop(key_fifo.popleft(), None) key_fifo.append(key) def clear(self): cache.clear() key_fifo.clear() def cache_len(self): return len(cache) self.get = types.MethodType(get, self) self.set = types.MethodType(set, self) self.clear = types.MethodType(clear, self) self.__len__ = types.MethodType(cache_len, self) # argument cache for optimizing repeated calls when backtracking through recursive expressions packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail packrat_cache_lock = RLock() packrat_cache_stats = [0, 0] # this method gets repeatedly called during backtracking with the same arguments - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): HIT, MISS = 0, 1 lookup = (self, instring, loc, callPreParse, doActions) with ParserElement.packrat_cache_lock: cache = ParserElement.packrat_cache value = cache.get(lookup) if value is cache.not_in_cache: ParserElement.packrat_cache_stats[MISS] += 1 try: value = self._parseNoCache(instring, loc, doActions, callPreParse) except ParseBaseException as pe: # cache a copy of the exception, without the traceback cache.set(lookup, pe.__class__(*pe.args)) raise else: cache.set(lookup, (value[0], value[1].copy())) return value else: ParserElement.packrat_cache_stats[HIT] += 1 if isinstance(value, Exception): raise value return (value[0], value[1].copy()) _parse = _parseNoCache @staticmethod def resetCache(): ParserElement.packrat_cache.clear() ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats) _packratEnabled = False @staticmethod def enablePackrat(cache_size_limit=128): """Enables "packrat" parsing, which adds memoizing to the parsing logic. Repeated parse attempts at the same string location (which happens often in many complex grammars) can immediately return a cached value, instead of re-executing parsing/validating code. Memoizing is done of both valid results and parsing exceptions. Parameters: - cache_size_limit - (default=C{128}) - if an integer value is provided will limit the size of the packrat cache; if None is passed, then the cache size will be unbounded; if 0 is passed, the cache will be effectively disabled. This speedup may break existing programs that use parse actions that have side-effects. For this reason, packrat parsing is disabled when you first import pyparsing. To activate the packrat feature, your program must call the class method C{ParserElement.enablePackrat()}. If your program uses C{psyco} to "compile as you go", you must call C{enablePackrat} before calling C{psyco.full()}. If you do not do this, Python will crash. For best results, call C{enablePackrat()} immediately after importing pyparsing. Example:: import pyparsing pyparsing.ParserElement.enablePackrat() """ if not ParserElement._packratEnabled: ParserElement._packratEnabled = True if cache_size_limit is None: ParserElement.packrat_cache = ParserElement._UnboundedCache() else: ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit) ParserElement._parse = ParserElement._parseCache def parseString( self, instring, parseAll=False ): """ Execute the parse expression with the given string. This is the main interface to the client code, once the complete expression has been built. If you want the grammar to require that the entire input string be successfully parsed, then set C{parseAll} to True (equivalent to ending the grammar with C{L{StringEnd()}}). Note: C{parseString} implicitly calls C{expandtabs()} on the input string, in order to report proper column numbers in parse actions. If the input string contains tabs and the grammar uses parse actions that use the C{loc} argument to index into the string being parsed, you can ensure you have a consistent view of the input string by: - calling C{parseWithTabs} on your grammar before calling C{parseString} (see L{I{parseWithTabs}<parseWithTabs>}) - define your parse action using the full C{(s,loc,toks)} signature, and reference the input string using the parse action's C{s} argument - explictly expand the tabs in your input string before calling C{parseString} Example:: Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text """ ParserElement.resetCache() if not self.streamlined: self.streamline() #~ self.saveAsList = True for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = instring.expandtabs() try: loc, tokens = self._parse( instring, 0 ) if parseAll: loc = self.preParse( instring, loc ) se = Empty() + StringEnd() se._parse( instring, loc ) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc else: return tokens def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): """ Scan the input string for expression matches. Each match will return the matching tokens, start location, and end location. May be called with optional C{maxMatches} argument, to clip scanning after 'n' matches are found. If C{overlap} is specified, then overlapping matches will be reported. Note that the start and end locations are reported relative to the string being parsed. See L{I{parseString}<parseString>} for more information on parsing strings with embedded tabs. Example:: source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" print(source) for tokens,start,end in Word(alphas).scanString(source): print(' '*start + '^'*(end-start)) print(' '*start + tokens[0]) prints:: sldjf123lsdjjkf345sldkjf879lkjsfd987 ^^^^^ sldjf ^^^^^^^ lsdjjkf ^^^^^^ sldkjf ^^^^^^ lkjsfd """ if not self.streamlined: self.streamline() for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = _ustr(instring).expandtabs() instrlen = len(instring) loc = 0 preparseFn = self.preParse parseFn = self._parse ParserElement.resetCache() matches = 0 try: while loc <= instrlen and matches < maxMatches: try: preloc = preparseFn( instring, loc ) nextLoc,tokens = parseFn( instring, preloc, callPreParse=False ) except ParseException: loc = preloc+1 else: if nextLoc > loc: matches += 1 yield tokens, preloc, nextLoc if overlap: nextloc = preparseFn( instring, loc ) if nextloc > loc: loc = nextLoc else: loc += 1 else: loc = nextLoc else: loc = preloc+1 except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc def transformString( self, instring ): """ Extension to C{L{scanString}}, to modify matching text with modified tokens that may be returned from a parse action. To use C{transformString}, define a grammar and attach a parse action to it that modifies the returned token list. Invoking C{transformString()} on a target string will then scan for matches, and replace the matched text patterns according to the logic in the parse action. C{transformString()} returns the resulting transformed string. Example:: wd = Word(alphas) wd.setParseAction(lambda toks: toks[0].title()) print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) Prints:: Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. """ out = [] lastE = 0 # force preservation of <TAB>s, to minimize unwanted transformation of string, and to # keep string locs straight between transformString and scanString self.keepTabs = True try: for t,s,e in self.scanString( instring ): out.append( instring[lastE:s] ) if t: if isinstance(t,ParseResults): out += t.asList() elif isinstance(t,list): out += t else: out.append(t) lastE = e out.append(instring[lastE:]) out = [o for o in out if o] return "".join(map(_ustr,_flatten(out))) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc def searchString( self, instring, maxMatches=_MAX_INT ): """ Another extension to C{L{scanString}}, simplifying the access to the tokens found to match the given parse expression. May be called with optional C{maxMatches} argument, to clip searching after 'n' matches are found. Example:: # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters cap_word = Word(alphas.upper(), alphas.lower()) print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) # the sum() builtin can be used to merge results into a single ParseResults object print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))) prints:: [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] """ try: return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): """ Generator method to split a string using the given expression as a separator. May be called with optional C{maxsplit} argument, to limit the number of splits; and the optional C{includeSeparators} argument (default=C{False}), if the separating matching text should be included in the split results. Example:: punc = oneOf(list(".,;:/-!?")) print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) prints:: ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] """ splits = 0 last = 0 for t,s,e in self.scanString(instring, maxMatches=maxsplit): yield instring[last:s] if includeSeparators: yield t[0] last = e yield instring[last:] def __add__(self, other ): """ Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement converts them to L{Literal}s by default. Example:: greet = Word(alphas) + "," + Word(alphas) + "!" hello = "Hello, World!" print (hello, "->", greet.parseString(hello)) Prints:: Hello, World! -> ['Hello', ',', 'World', '!'] """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return And( [ self, other ] ) def __radd__(self, other ): """ Implementation of + operator when left operand is not a C{L{ParserElement}} """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return other + self def __sub__(self, other): """ Implementation of - operator, returns C{L{And}} with error stop """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return self + And._ErrorStop() + other def __rsub__(self, other ): """ Implementation of - operator when left operand is not a C{L{ParserElement}} """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return other - self def __mul__(self,other): """ Implementation of * operator, allows use of C{expr * 3} in place of C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples may also include C{None} as in: - C{expr*(n,None)} or C{expr*(n,)} is equivalent to C{expr*n + L{ZeroOrMore}(expr)} (read as "at least n instances of C{expr}") - C{expr*(None,n)} is equivalent to C{expr*(0,n)} (read as "0 to n instances of C{expr}") - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)} - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)} Note that C{expr*(None,n)} does not raise an exception if more than n exprs exist in the input stream; that is, C{expr*(None,n)} does not enforce a maximum number of expr occurrences. If this behavior is desired, then write C{expr*(None,n) + ~expr} """ if isinstance(other,int): minElements, optElements = other,0 elif isinstance(other,tuple): other = (other + (None, None))[:2] if other[0] is None: other = (0, other[1]) if isinstance(other[0],int) and other[1] is None: if other[0] == 0: return ZeroOrMore(self) if other[0] == 1: return OneOrMore(self) else: return self*other[0] + ZeroOrMore(self) elif isinstance(other[0],int) and isinstance(other[1],int): minElements, optElements = other optElements -= minElements else: raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1])) else: raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) if minElements < 0: raise ValueError("cannot multiply ParserElement by negative value") if optElements < 0: raise ValueError("second tuple value must be greater or equal to first tuple value") if minElements == optElements == 0: raise ValueError("cannot multiply ParserElement by 0 or (0,0)") if (optElements): def makeOptionalList(n): if n>1: return Optional(self + makeOptionalList(n-1)) else: return Optional(self) if minElements: if minElements == 1: ret = self + makeOptionalList(optElements) else: ret = And([self]*minElements) + makeOptionalList(optElements) else: ret = makeOptionalList(optElements) else: if minElements == 1: ret = self else: ret = And([self]*minElements) return ret def __rmul__(self, other): return self.__mul__(other) def __or__(self, other ): """ Implementation of | operator - returns C{L{MatchFirst}} """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return MatchFirst( [ self, other ] ) def __ror__(self, other ): """ Implementation of | operator when left operand is not a C{L{ParserElement}} """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return other | self def __xor__(self, other ): """ Implementation of ^ operator - returns C{L{Or}} """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return Or( [ self, other ] ) def __rxor__(self, other ): """ Implementation of ^ operator when left operand is not a C{L{ParserElement}} """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return other ^ self def __and__(self, other ): """ Implementation of & operator - returns C{L{Each}} """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return Each( [ self, other ] ) def __rand__(self, other ): """ Implementation of & operator when left operand is not a C{L{ParserElement}} """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return other & self def __invert__( self ): """ Implementation of ~ operator - returns C{L{NotAny}} """ return NotAny( self ) def __call__(self, name=None): """ Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}. If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be passed as C{True}. If C{name} is omitted, same as calling C{L{copy}}. Example:: # these are equivalent userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") """ if name is not None: return self.setResultsName(name) else: return self.copy() def suppress( self ): """ Suppresses the output of this C{ParserElement}; useful to keep punctuation from cluttering up returned output. """ return Suppress( self ) def leaveWhitespace( self ): """ Disables the skipping of whitespace before matching the characters in the C{ParserElement}'s defined pattern. This is normally only used internally by the pyparsing module, but may be needed in some whitespace-sensitive grammars. """ self.skipWhitespace = False return self def setWhitespaceChars( self, chars ): """ Overrides the default whitespace chars """ self.skipWhitespace = True self.whiteChars = chars self.copyDefaultWhiteChars = False return self def parseWithTabs( self ): """ Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string. Must be called before C{parseString} when the input grammar contains elements that match C{<TAB>} characters. """ self.keepTabs = True return self def ignore( self, other ): """ Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other ignorable patterns. Example:: patt = OneOrMore(Word(alphas)) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] patt.ignore(cStyleComment) patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] """ if isinstance(other, basestring): other = Suppress(other) if isinstance( other, Suppress ): if other not in self.ignoreExprs: self.ignoreExprs.append(other) else: self.ignoreExprs.append( Suppress( other.copy() ) ) return self def setDebugActions( self, startAction, successAction, exceptionAction ): """ Enable display of debugging messages while doing pattern matching. """ self.debugActions = (startAction or _defaultStartDebugAction, successAction or _defaultSuccessDebugAction, exceptionAction or _defaultExceptionDebugAction) self.debug = True return self def setDebug( self, flag=True ): """ Enable display of debugging messages while doing pattern matching. Set C{flag} to True to enable, False to disable. Example:: wd = Word(alphas).setName("alphaword") integer = Word(nums).setName("numword") term = wd | integer # turn on debugging for wd wd.setDebug() OneOrMore(term).parseString("abc 123 xyz 890") prints:: Match alphaword at loc 0(1,1) Matched alphaword -> ['abc'] Match alphaword at loc 3(1,4) Exception raised:Expected alphaword (at char 4), (line:1, col:5) Match alphaword at loc 7(1,8) Matched alphaword -> ['xyz'] Match alphaword at loc 11(1,12) Exception raised:Expected alphaword (at char 12), (line:1, col:13) Match alphaword at loc 15(1,16) Exception raised:Expected alphaword (at char 15), (line:1, col:16) The output shown is that produced by the default debug actions - custom debug actions can be specified using L{setDebugActions}. Prior to attempting to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"} is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"} message is shown. Also note the use of L{setName} to assign a human-readable name to the expression, which makes debugging and exception messages easier to understand - for instance, the default name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}. """ if flag: self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) else: self.debug = False return self def __str__( self ): return self.name def __repr__( self ): return _ustr(self) def streamline( self ): self.streamlined = True self.strRepr = None return self def checkRecursion( self, parseElementList ): pass def validate( self, validateTrace=[] ): """ Check defined expressions for valid structure, check for infinite recursive definitions. """ self.checkRecursion( [] ) def parseFile( self, file_or_filename, parseAll=False ): """ Execute the parse expression on the given file or filename. If a filename is specified (instead of a file object), the entire file is opened, read, and closed before parsing. """ try: file_contents = file_or_filename.read() except AttributeError: with open(file_or_filename, "r") as f: file_contents = f.read() try: return self.parseString(file_contents, parseAll) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc def __eq__(self,other): if isinstance(other, ParserElement): return self is other or vars(self) == vars(other) elif isinstance(other, basestring): return self.matches(other) else: return super(ParserElement,self)==other def __ne__(self,other): return not (self == other) def __hash__(self): return hash(id(self)) def __req__(self,other): return self == other def __rne__(self,other): return not (self == other) def matches(self, testString, parseAll=True): """ Method for quick testing of a parser against a test string. Good for simple inline microtests of sub expressions while building up larger parser. Parameters: - testString - to test against this expression for a match - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests Example:: expr = Word(nums) assert expr.matches("100") """ try: self.parseString(_ustr(testString), parseAll=parseAll) return True except ParseBaseException: return False def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False): """ Execute the parse expression on a series of test strings, showing each test, the parsed results or where the parse failed. Quick and easy way to run a parse expression against a list of sample strings. Parameters: - tests - a list of separate test strings, or a multiline string of test strings - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests - comment - (default=C{'#'}) - expression for indicating embedded comments in the test string; pass None to disable comment filtering - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline; if False, only dump nested list - printResults - (default=C{True}) prints test output to stdout - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing Returns: a (success, results) tuple, where success indicates that all tests succeeded (or failed if C{failureTests} is True), and the results contain a list of lines of each test's output Example:: number_expr = pyparsing_common.number.copy() result = number_expr.runTests(''' # unsigned integer 100 # negative integer -100 # float with scientific notation 6.02e23 # integer with scientific notation 1e-12 ''') print("Success" if result[0] else "Failed!") result = number_expr.runTests(''' # stray character 100Z # missing leading digit before '.' -.100 # too many '.' 3.14.159 ''', failureTests=True) print("Success" if result[0] else "Failed!") prints:: # unsigned integer 100 [100] # negative integer -100 [-100] # float with scientific notation 6.02e23 [6.02e+23] # integer with scientific notation 1e-12 [1e-12] Success # stray character 100Z ^ FAIL: Expected end of text (at char 3), (line:1, col:4) # missing leading digit before '.' -.100 ^ FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) # too many '.' 3.14.159 ^ FAIL: Expected end of text (at char 4), (line:1, col:5) Success Each test string must be on a single line. If you want to test a string that spans multiple lines, create a test like this:: expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines") (Note that this is a raw string literal, you must include the leading 'r'.) """ if isinstance(tests, basestring): tests = list(map(str.strip, tests.rstrip().splitlines())) if isinstance(comment, basestring): comment = Literal(comment) allResults = [] comments = [] success = True for t in tests: if comment is not None and comment.matches(t, False) or comments and not t: comments.append(t) continue if not t: continue out = ['\n'.join(comments), t] comments = [] try: t = t.replace(r'\n','\n') result = self.parseString(t, parseAll=parseAll) out.append(result.dump(full=fullDump)) success = success and not failureTests except ParseBaseException as pe: fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" if '\n' in t: out.append(line(pe.loc, t)) out.append(' '*(col(pe.loc,t)-1) + '^' + fatal) else: out.append(' '*pe.loc + '^' + fatal) out.append("FAIL: " + str(pe)) success = success and failureTests result = pe except Exception as exc: out.append("FAIL-EXCEPTION: " + str(exc)) success = success and failureTests result = exc if printResults: if fullDump: out.append('') print('\n'.join(out)) allResults.append((t, result)) return success, allResults class Token(ParserElement): """ Abstract C{ParserElement} subclass, for defining atomic matching patterns. """ def __init__( self ): super(Token,self).__init__( savelist=False ) class Empty(Token): """ An empty token, will always match. """ def __init__( self ): super(Empty,self).__init__() self.name = "Empty" self.mayReturnEmpty = True self.mayIndexError = False class NoMatch(Token): """ A token that will never match. """ def __init__( self ): super(NoMatch,self).__init__() self.name = "NoMatch" self.mayReturnEmpty = True self.mayIndexError = False self.errmsg = "Unmatchable token" def parseImpl( self, instring, loc, doActions=True ): raise ParseException(instring, loc, self.errmsg, self) class Literal(Token): """ Token to exactly match a specified string. Example:: Literal('blah').parseString('blah') # -> ['blah'] Literal('blah').parseString('blahfooblah') # -> ['blah'] Literal('blah').parseString('bla') # -> Exception: Expected "blah" For case-insensitive matching, use L{CaselessLiteral}. For keyword matching (force word break before and after the matched string), use L{Keyword} or L{CaselessKeyword}. """ def __init__( self, matchString ): super(Literal,self).__init__() self.match = matchString self.matchLen = len(matchString) try: self.firstMatchChar = matchString[0] except IndexError: warnings.warn("null string passed to Literal; use Empty() instead", SyntaxWarning, stacklevel=2) self.__class__ = Empty self.name = '"%s"' % _ustr(self.match) self.errmsg = "Expected " + self.name self.mayReturnEmpty = False self.mayIndexError = False # Performance tuning: this routine gets called a *lot* # if this is a single character match string and the first character matches, # short-circuit as quickly as possible, and avoid calling startswith #~ @profile def parseImpl( self, instring, loc, doActions=True ): if (instring[loc] == self.firstMatchChar and (self.matchLen==1 or instring.startswith(self.match,loc)) ): return loc+self.matchLen, self.match raise ParseException(instring, loc, self.errmsg, self) _L = Literal ParserElement._literalStringClass = Literal class Keyword(Token): """ Token to exactly match a specified string as a keyword, that is, it must be immediately followed by a non-keyword character. Compare with C{L{Literal}}: - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}. - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'} Accepts two optional constructor arguments in addition to the keyword string: - C{identChars} is a string of characters that would be valid identifier characters, defaulting to all alphanumerics + "_" and "$" - C{caseless} allows case-insensitive matching, default is C{False}. Example:: Keyword("start").parseString("start") # -> ['start'] Keyword("start").parseString("starting") # -> Exception For case-insensitive matching, use L{CaselessKeyword}. """ DEFAULT_KEYWORD_CHARS = alphanums+"_$" def __init__( self, matchString, identChars=None, caseless=False ): super(Keyword,self).__init__() if identChars is None: identChars = Keyword.DEFAULT_KEYWORD_CHARS self.match = matchString self.matchLen = len(matchString) try: self.firstMatchChar = matchString[0] except IndexError: warnings.warn("null string passed to Keyword; use Empty() instead", SyntaxWarning, stacklevel=2) self.name = '"%s"' % self.match self.errmsg = "Expected " + self.name self.mayReturnEmpty = False self.mayIndexError = False self.caseless = caseless if caseless: self.caselessmatch = matchString.upper() identChars = identChars.upper() self.identChars = set(identChars) def parseImpl( self, instring, loc, doActions=True ): if self.caseless: if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and (loc == 0 or instring[loc-1].upper() not in self.identChars) ): return loc+self.matchLen, self.match else: if (instring[loc] == self.firstMatchChar and (self.matchLen==1 or instring.startswith(self.match,loc)) and (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and (loc == 0 or instring[loc-1] not in self.identChars) ): return loc+self.matchLen, self.match raise ParseException(instring, loc, self.errmsg, self) def copy(self): c = super(Keyword,self).copy() c.identChars = Keyword.DEFAULT_KEYWORD_CHARS return c @staticmethod def setDefaultKeywordChars( chars ): """Overrides the default Keyword chars """ Keyword.DEFAULT_KEYWORD_CHARS = chars class CaselessLiteral(Literal): """ Token to match a specified string, ignoring case of letters. Note: the matched results will always be in the case of the given match string, NOT the case of the input text. Example:: OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] (Contrast with example for L{CaselessKeyword}.) """ def __init__( self, matchString ): super(CaselessLiteral,self).__init__( matchString.upper() ) # Preserve the defining literal. self.returnString = matchString self.name = "'%s'" % self.returnString self.errmsg = "Expected " + self.name def parseImpl( self, instring, loc, doActions=True ): if instring[ loc:loc+self.matchLen ].upper() == self.match: return loc+self.matchLen, self.returnString raise ParseException(instring, loc, self.errmsg, self) class CaselessKeyword(Keyword): """ Caseless version of L{Keyword}. Example:: OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] (Contrast with example for L{CaselessLiteral}.) """ def __init__( self, matchString, identChars=None ): super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) def parseImpl( self, instring, loc, doActions=True ): if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ): return loc+self.matchLen, self.match raise ParseException(instring, loc, self.errmsg, self) class CloseMatch(Token): """ A variation on L{Literal} which matches "close" matches, that is, strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters: - C{match_string} - string to be matched - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match The results from a successful parse will contain the matched text from the input string and the following named results: - C{mismatches} - a list of the positions within the match_string where mismatches were found - C{original} - the original match_string used to compare against the input string If C{mismatches} is an empty list, then the match was an exact match. Example:: patt = CloseMatch("ATCATCGAATGGA") patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) # exact match patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) # close match allowing up to 2 mismatches patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2) patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) """ def __init__(self, match_string, maxMismatches=1): super(CloseMatch,self).__init__() self.name = match_string self.match_string = match_string self.maxMismatches = maxMismatches self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches) self.mayIndexError = False self.mayReturnEmpty = False def parseImpl( self, instring, loc, doActions=True ): start = loc instrlen = len(instring) maxloc = start + len(self.match_string) if maxloc <= instrlen: match_string = self.match_string match_stringloc = 0 mismatches = [] maxMismatches = self.maxMismatches for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)): src,mat = s_m if src != mat: mismatches.append(match_stringloc) if len(mismatches) > maxMismatches: break else: loc = match_stringloc + 1 results = ParseResults([instring[start:loc]]) results['original'] = self.match_string results['mismatches'] = mismatches return loc, results raise ParseException(instring, loc, self.errmsg, self) class Word(Token): """ Token for matching words composed of allowed character sets. Defined with string containing all allowed initial characters, an optional string containing allowed body characters (if omitted, defaults to the initial character set), and an optional minimum, maximum, and/or exact length. The default value for C{min} is 1 (a minimum value < 1 is not valid); the default values for C{max} and C{exact} are 0, meaning no maximum or exact length restriction. An optional C{excludeChars} parameter can list characters that might be found in the input C{bodyChars} string; useful to define a word of all printables except for one or two characters, for instance. L{srange} is useful for defining custom character set strings for defining C{Word} expressions, using range notation from regular expression character sets. A common mistake is to use C{Word} to match a specific literal string, as in C{Word("Address")}. Remember that C{Word} uses the string argument to define I{sets} of matchable characters. This expression would match "Add", "AAA", "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an exact literal string, use L{Literal} or L{Keyword}. pyparsing includes helper strings for building Words: - L{alphas} - L{nums} - L{alphanums} - L{hexnums} - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.) - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.) - L{printables} (any non-whitespace character) Example:: # a word composed of digits integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) # a word with a leading capital, and zero or more lowercase capital_word = Word(alphas.upper(), alphas.lower()) # hostnames are alphanumeric, with leading alpha, and '-' hostname = Word(alphas, alphanums+'-') # roman numeral (not a strict parser, accepts invalid mix of characters) roman = Word("IVXLCDM") # any string of non-whitespace characters, except for ',' csv_value = Word(printables, excludeChars=",") """ def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ): super(Word,self).__init__() if excludeChars: initChars = ''.join(c for c in initChars if c not in excludeChars) if bodyChars: bodyChars = ''.join(c for c in bodyChars if c not in excludeChars) self.initCharsOrig = initChars self.initChars = set(initChars) if bodyChars : self.bodyCharsOrig = bodyChars self.bodyChars = set(bodyChars) else: self.bodyCharsOrig = initChars self.bodyChars = set(initChars) self.maxSpecified = max > 0 if min < 1: raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted") self.minLen = min if max > 0: self.maxLen = max else: self.maxLen = _MAX_INT if exact > 0: self.maxLen = exact self.minLen = exact self.name = _ustr(self) self.errmsg = "Expected " + self.name self.mayIndexError = False self.asKeyword = asKeyword if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0): if self.bodyCharsOrig == self.initCharsOrig: self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) elif len(self.initCharsOrig) == 1: self.reString = "%s[%s]*" % \ (re.escape(self.initCharsOrig), _escapeRegexRangeChars(self.bodyCharsOrig),) else: self.reString = "[%s][%s]*" % \ (_escapeRegexRangeChars(self.initCharsOrig), _escapeRegexRangeChars(self.bodyCharsOrig),) if self.asKeyword: self.reString = r"\b"+self.reString+r"\b" try: self.re = re.compile( self.reString ) except Exception: self.re = None def parseImpl( self, instring, loc, doActions=True ): if self.re: result = self.re.match(instring,loc) if not result: raise ParseException(instring, loc, self.errmsg, self) loc = result.end() return loc, result.group() if not(instring[ loc ] in self.initChars): raise ParseException(instring, loc, self.errmsg, self) start = loc loc += 1 instrlen = len(instring) bodychars = self.bodyChars maxloc = start + self.maxLen maxloc = min( maxloc, instrlen ) while loc < maxloc and instring[loc] in bodychars: loc += 1 throwException = False if loc - start < self.minLen: throwException = True if self.maxSpecified and loc < instrlen and instring[loc] in bodychars: throwException = True if self.asKeyword: if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars): throwException = True if throwException: raise ParseException(instring, loc, self.errmsg, self) return loc, instring[start:loc] def __str__( self ): try: return super(Word,self).__str__() except Exception: pass if self.strRepr is None: def charsAsStr(s): if len(s)>4: return s[:4]+"..." else: return s if ( self.initCharsOrig != self.bodyCharsOrig ): self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) ) else: self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) return self.strRepr class Regex(Token): r""" Token for matching strings that match a given regular expression. Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as named parse results. Example:: realnum = Regex(r"[+-]?\d+\.\d*") date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)') # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") """ compiledREtype = type(re.compile("[A-Z]")) def __init__( self, pattern, flags=0): """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags.""" super(Regex,self).__init__() if isinstance(pattern, basestring): if not pattern: warnings.warn("null string passed to Regex; use Empty() instead", SyntaxWarning, stacklevel=2) self.pattern = pattern self.flags = flags try: self.re = re.compile(self.pattern, self.flags) self.reString = self.pattern except sre_constants.error: warnings.warn("invalid pattern (%s) passed to Regex" % pattern, SyntaxWarning, stacklevel=2) raise elif isinstance(pattern, Regex.compiledREtype): self.re = pattern self.pattern = \ self.reString = str(pattern) self.flags = flags else: raise ValueError("Regex may only be constructed with a string or a compiled RE object") self.name = _ustr(self) self.errmsg = "Expected " + self.name self.mayIndexError = False self.mayReturnEmpty = True def parseImpl( self, instring, loc, doActions=True ): result = self.re.match(instring,loc) if not result: raise ParseException(instring, loc, self.errmsg, self) loc = result.end() d = result.groupdict() ret = ParseResults(result.group()) if d: for k in d: ret[k] = d[k] return loc,ret def __str__( self ): try: return super(Regex,self).__str__() except Exception: pass if self.strRepr is None: self.strRepr = "Re:(%s)" % repr(self.pattern) return self.strRepr class QuotedString(Token): r""" Token for matching strings that are delimited by quoting characters. Defined with the following parameters: - quoteChar - string of one or more characters defining the quote delimiting string - escChar - character to escape quotes, typically backslash (default=C{None}) - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None}) - multiline - boolean indicating whether quotes can span multiple lines (default=C{False}) - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True}) - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar) - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True}) Example:: qs = QuotedString('"') print(qs.searchString('lsjdf "This is the quote" sldjf')) complex_qs = QuotedString('{{', endQuoteChar='}}') print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) sql_qs = QuotedString('"', escQuote='""') print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) prints:: [['This is the quote']] [['This is the "quote"']] [['This is the quote with "embedded" quotes']] """ def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): super(QuotedString,self).__init__() # remove white space from quote chars - wont work anyway quoteChar = quoteChar.strip() if not quoteChar: warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) raise SyntaxError() if endQuoteChar is None: endQuoteChar = quoteChar else: endQuoteChar = endQuoteChar.strip() if not endQuoteChar: warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) raise SyntaxError() self.quoteChar = quoteChar self.quoteCharLen = len(quoteChar) self.firstQuoteChar = quoteChar[0] self.endQuoteChar = endQuoteChar self.endQuoteCharLen = len(endQuoteChar) self.escChar = escChar self.escQuote = escQuote self.unquoteResults = unquoteResults self.convertWhitespaceEscapes = convertWhitespaceEscapes if multiline: self.flags = re.MULTILINE | re.DOTALL self.pattern = r'%s(?:[^%s%s]' % \ ( re.escape(self.quoteChar), _escapeRegexRangeChars(self.endQuoteChar[0]), (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) else: self.flags = 0 self.pattern = r'%s(?:[^%s\n\r%s]' % \ ( re.escape(self.quoteChar), _escapeRegexRangeChars(self.endQuoteChar[0]), (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) if len(self.endQuoteChar) > 1: self.pattern += ( '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]), _escapeRegexRangeChars(self.endQuoteChar[i])) for i in range(len(self.endQuoteChar)-1,0,-1)) + ')' ) if escQuote: self.pattern += (r'|(?:%s)' % re.escape(escQuote)) if escChar: self.pattern += (r'|(?:%s.)' % re.escape(escChar)) self.escCharReplacePattern = re.escape(self.escChar)+"(.)" self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) try: self.re = re.compile(self.pattern, self.flags) self.reString = self.pattern except sre_constants.error: warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, SyntaxWarning, stacklevel=2) raise self.name = _ustr(self) self.errmsg = "Expected " + self.name self.mayIndexError = False self.mayReturnEmpty = True def parseImpl( self, instring, loc, doActions=True ): result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None if not result: raise ParseException(instring, loc, self.errmsg, self) loc = result.end() ret = result.group() if self.unquoteResults: # strip off quotes ret = ret[self.quoteCharLen:-self.endQuoteCharLen] if isinstance(ret,basestring): # replace escaped whitespace if '\\' in ret and self.convertWhitespaceEscapes: ws_map = { r'\t' : '\t', r'\n' : '\n', r'\f' : '\f', r'\r' : '\r', } for wslit,wschar in ws_map.items(): ret = ret.replace(wslit, wschar) # replace escaped characters if self.escChar: ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) # replace escaped quotes if self.escQuote: ret = ret.replace(self.escQuote, self.endQuoteChar) return loc, ret def __str__( self ): try: return super(QuotedString,self).__str__() except Exception: pass if self.strRepr is None: self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar) return self.strRepr class CharsNotIn(Token): """ Token for matching words composed of characters I{not} in a given set (will include whitespace in matched characters if not listed in the provided exclusion set - see example). Defined with string containing all disallowed characters, and an optional minimum, maximum, and/or exact length. The default value for C{min} is 1 (a minimum value < 1 is not valid); the default values for C{max} and C{exact} are 0, meaning no maximum or exact length restriction. Example:: # define a comma-separated-value as anything that is not a ',' csv_value = CharsNotIn(',') print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) prints:: ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] """ def __init__( self, notChars, min=1, max=0, exact=0 ): super(CharsNotIn,self).__init__() self.skipWhitespace = False self.notChars = notChars if min < 1: raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted") self.minLen = min if max > 0: self.maxLen = max else: self.maxLen = _MAX_INT if exact > 0: self.maxLen = exact self.minLen = exact self.name = _ustr(self) self.errmsg = "Expected " + self.name self.mayReturnEmpty = ( self.minLen == 0 ) self.mayIndexError = False def parseImpl( self, instring, loc, doActions=True ): if instring[loc] in self.notChars: raise ParseException(instring, loc, self.errmsg, self) start = loc loc += 1 notchars = self.notChars maxlen = min( start+self.maxLen, len(instring) ) while loc < maxlen and \ (instring[loc] not in notchars): loc += 1 if loc - start < self.minLen: raise ParseException(instring, loc, self.errmsg, self) return loc, instring[start:loc] def __str__( self ): try: return super(CharsNotIn, self).__str__() except Exception: pass if self.strRepr is None: if len(self.notChars) > 4: self.strRepr = "!W:(%s...)" % self.notChars[:4] else: self.strRepr = "!W:(%s)" % self.notChars return self.strRepr class White(Token): """ Special matching class for matching whitespace. Normally, whitespace is ignored by pyparsing grammars. This class is included when some whitespace structures are significant. Define with a string containing the whitespace characters to be matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments, as defined for the C{L{Word}} class. """ whiteStrs = { " " : "<SPC>", "\t": "<TAB>", "\n": "<LF>", "\r": "<CR>", "\f": "<FF>", } def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): super(White,self).__init__() self.matchWhite = ws self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) ) #~ self.leaveWhitespace() self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite)) self.mayReturnEmpty = True self.errmsg = "Expected " + self.name self.minLen = min if max > 0: self.maxLen = max else: self.maxLen = _MAX_INT if exact > 0: self.maxLen = exact self.minLen = exact def parseImpl( self, instring, loc, doActions=True ): if not(instring[ loc ] in self.matchWhite): raise ParseException(instring, loc, self.errmsg, self) start = loc loc += 1 maxloc = start + self.maxLen maxloc = min( maxloc, len(instring) ) while loc < maxloc and instring[loc] in self.matchWhite: loc += 1 if loc - start < self.minLen: raise ParseException(instring, loc, self.errmsg, self) return loc, instring[start:loc] class _PositionToken(Token): def __init__( self ): super(_PositionToken,self).__init__() self.name=self.__class__.__name__ self.mayReturnEmpty = True self.mayIndexError = False class GoToColumn(_PositionToken): """ Token to advance to a specific column of input text; useful for tabular report scraping. """ def __init__( self, colno ): super(GoToColumn,self).__init__() self.col = colno def preParse( self, instring, loc ): if col(loc,instring) != self.col: instrlen = len(instring) if self.ignoreExprs: loc = self._skipIgnorables( instring, loc ) while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col : loc += 1 return loc def parseImpl( self, instring, loc, doActions=True ): thiscol = col( loc, instring ) if thiscol > self.col: raise ParseException( instring, loc, "Text not in expected column", self ) newloc = loc + self.col - thiscol ret = instring[ loc: newloc ] return newloc, ret class LineStart(_PositionToken): """ Matches if current position is at the beginning of a line within the parse string Example:: test = '''\ AAA this line AAA and this line AAA but not this one B AAA and definitely not this one ''' for t in (LineStart() + 'AAA' + restOfLine).searchString(test): print(t) Prints:: ['AAA', ' this line'] ['AAA', ' and this line'] """ def __init__( self ): super(LineStart,self).__init__() self.errmsg = "Expected start of line" def parseImpl( self, instring, loc, doActions=True ): if col(loc, instring) == 1: return loc, [] raise ParseException(instring, loc, self.errmsg, self) class LineEnd(_PositionToken): """ Matches if current position is at the end of a line within the parse string """ def __init__( self ): super(LineEnd,self).__init__() self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) self.errmsg = "Expected end of line" def parseImpl( self, instring, loc, doActions=True ): if loc<len(instring): if instring[loc] == "\n": return loc+1, "\n" else: raise ParseException(instring, loc, self.errmsg, self) elif loc == len(instring): return loc+1, [] else: raise ParseException(instring, loc, self.errmsg, self) class StringStart(_PositionToken): """ Matches if current position is at the beginning of the parse string """ def __init__( self ): super(StringStart,self).__init__() self.errmsg = "Expected start of text" def parseImpl( self, instring, loc, doActions=True ): if loc != 0: # see if entire string up to here is just whitespace and ignoreables if loc != self.preParse( instring, 0 ): raise ParseException(instring, loc, self.errmsg, self) return loc, [] class StringEnd(_PositionToken): """ Matches if current position is at the end of the parse string """ def __init__( self ): super(StringEnd,self).__init__() self.errmsg = "Expected end of text" def parseImpl( self, instring, loc, doActions=True ): if loc < len(instring): raise ParseException(instring, loc, self.errmsg, self) elif loc == len(instring): return loc+1, [] elif loc > len(instring): return loc, [] else: raise ParseException(instring, loc, self.errmsg, self) class WordStart(_PositionToken): """ Matches if the current position is at the beginning of a Word, and is not preceded by any character in a given set of C{wordChars} (default=C{printables}). To emulate the C{\b} behavior of regular expressions, use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of the string being parsed, or at the beginning of a line. """ def __init__(self, wordChars = printables): super(WordStart,self).__init__() self.wordChars = set(wordChars) self.errmsg = "Not at the start of a word" def parseImpl(self, instring, loc, doActions=True ): if loc != 0: if (instring[loc-1] in self.wordChars or instring[loc] not in self.wordChars): raise ParseException(instring, loc, self.errmsg, self) return loc, [] class WordEnd(_PositionToken): """ Matches if the current position is at the end of a Word, and is not followed by any character in a given set of C{wordChars} (default=C{printables}). To emulate the C{\b} behavior of regular expressions, use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of the string being parsed, or at the end of a line. """ def __init__(self, wordChars = printables): super(WordEnd,self).__init__() self.wordChars = set(wordChars) self.skipWhitespace = False self.errmsg = "Not at the end of a word" def parseImpl(self, instring, loc, doActions=True ): instrlen = len(instring) if instrlen>0 and loc<instrlen: if (instring[loc] in self.wordChars or instring[loc-1] not in self.wordChars): raise ParseException(instring, loc, self.errmsg, self) return loc, [] class ParseExpression(ParserElement): """ Abstract subclass of ParserElement, for combining and post-processing parsed tokens. """ def __init__( self, exprs, savelist = False ): super(ParseExpression,self).__init__(savelist) if isinstance( exprs, _generatorType ): exprs = list(exprs) if isinstance( exprs, basestring ): self.exprs = [ ParserElement._literalStringClass( exprs ) ] elif isinstance( exprs, Iterable ): exprs = list(exprs) # if sequence of strings provided, wrap with Literal if all(isinstance(expr, basestring) for expr in exprs): exprs = map(ParserElement._literalStringClass, exprs) self.exprs = list(exprs) else: try: self.exprs = list( exprs ) except TypeError: self.exprs = [ exprs ] self.callPreparse = False def __getitem__( self, i ): return self.exprs[i] def append( self, other ): self.exprs.append( other ) self.strRepr = None return self def leaveWhitespace( self ): """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on all contained expressions.""" self.skipWhitespace = False self.exprs = [ e.copy() for e in self.exprs ] for e in self.exprs: e.leaveWhitespace() return self def ignore( self, other ): if isinstance( other, Suppress ): if other not in self.ignoreExprs: super( ParseExpression, self).ignore( other ) for e in self.exprs: e.ignore( self.ignoreExprs[-1] ) else: super( ParseExpression, self).ignore( other ) for e in self.exprs: e.ignore( self.ignoreExprs[-1] ) return self def __str__( self ): try: return super(ParseExpression,self).__str__() except Exception: pass if self.strRepr is None: self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) ) return self.strRepr def streamline( self ): super(ParseExpression,self).streamline() for e in self.exprs: e.streamline() # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d ) # but only if there are no parse actions or resultsNames on the nested And's # (likewise for Or's and MatchFirst's) if ( len(self.exprs) == 2 ): other = self.exprs[0] if ( isinstance( other, self.__class__ ) and not(other.parseAction) and other.resultsName is None and not other.debug ): self.exprs = other.exprs[:] + [ self.exprs[1] ] self.strRepr = None self.mayReturnEmpty |= other.mayReturnEmpty self.mayIndexError |= other.mayIndexError other = self.exprs[-1] if ( isinstance( other, self.__class__ ) and not(other.parseAction) and other.resultsName is None and not other.debug ): self.exprs = self.exprs[:-1] + other.exprs[:] self.strRepr = None self.mayReturnEmpty |= other.mayReturnEmpty self.mayIndexError |= other.mayIndexError self.errmsg = "Expected " + _ustr(self) return self def setResultsName( self, name, listAllMatches=False ): ret = super(ParseExpression,self).setResultsName(name,listAllMatches) return ret def validate( self, validateTrace=[] ): tmp = validateTrace[:]+[self] for e in self.exprs: e.validate(tmp) self.checkRecursion( [] ) def copy(self): ret = super(ParseExpression,self).copy() ret.exprs = [e.copy() for e in self.exprs] return ret class And(ParseExpression): """ Requires all given C{ParseExpression}s to be found in the given order. Expressions may be separated by whitespace. May be constructed using the C{'+'} operator. May also be constructed using the C{'-'} operator, which will suppress backtracking. Example:: integer = Word(nums) name_expr = OneOrMore(Word(alphas)) expr = And([integer("id"),name_expr("name"),integer("age")]) # more easily written as: expr = integer("id") + name_expr("name") + integer("age") """ class _ErrorStop(Empty): def __init__(self, *args, **kwargs): super(And._ErrorStop,self).__init__(*args, **kwargs) self.name = '-' self.leaveWhitespace() def __init__( self, exprs, savelist = True ): super(And,self).__init__(exprs, savelist) self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) self.setWhitespaceChars( self.exprs[0].whiteChars ) self.skipWhitespace = self.exprs[0].skipWhitespace self.callPreparse = True def parseImpl( self, instring, loc, doActions=True ): # pass False as last arg to _parse for first element, since we already # pre-parsed the string as part of our And pre-parsing loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False ) errorStop = False for e in self.exprs[1:]: if isinstance(e, And._ErrorStop): errorStop = True continue if errorStop: try: loc, exprtokens = e._parse( instring, loc, doActions ) except ParseSyntaxException: raise except ParseBaseException as pe: pe.__traceback__ = None raise ParseSyntaxException._from_exception(pe) except IndexError: raise ParseSyntaxException(instring, len(instring), self.errmsg, self) else: loc, exprtokens = e._parse( instring, loc, doActions ) if exprtokens or exprtokens.haskeys(): resultlist += exprtokens return loc, resultlist def __iadd__(self, other ): if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) return self.append( other ) #And( [ self, other ] ) def checkRecursion( self, parseElementList ): subRecCheckList = parseElementList[:] + [ self ] for e in self.exprs: e.checkRecursion( subRecCheckList ) if not e.mayReturnEmpty: break def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}" return self.strRepr class Or(ParseExpression): """ Requires that at least one C{ParseExpression} is found. If two expressions match, the expression that matches the longest string will be used. May be constructed using the C{'^'} operator. Example:: # construct Or using '^' operator number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) print(number.searchString("123 3.1416 789")) prints:: [['123'], ['3.1416'], ['789']] """ def __init__( self, exprs, savelist = False ): super(Or,self).__init__(exprs, savelist) if self.exprs: self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) else: self.mayReturnEmpty = True def parseImpl( self, instring, loc, doActions=True ): maxExcLoc = -1 maxException = None matches = [] for e in self.exprs: try: loc2 = e.tryParse( instring, loc ) except ParseException as err: err.__traceback__ = None if err.loc > maxExcLoc: maxException = err maxExcLoc = err.loc except IndexError: if len(instring) > maxExcLoc: maxException = ParseException(instring,len(instring),e.errmsg,self) maxExcLoc = len(instring) else: # save match among all matches, to retry longest to shortest matches.append((loc2, e)) if matches: matches.sort(key=lambda x: -x[0]) for _,e in matches: try: return e._parse( instring, loc, doActions ) except ParseException as err: err.__traceback__ = None if err.loc > maxExcLoc: maxException = err maxExcLoc = err.loc if maxException is not None: maxException.msg = self.errmsg raise maxException else: raise ParseException(instring, loc, "no defined alternatives to match", self) def __ixor__(self, other ): if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) return self.append( other ) #Or( [ self, other ] ) def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}" return self.strRepr def checkRecursion( self, parseElementList ): subRecCheckList = parseElementList[:] + [ self ] for e in self.exprs: e.checkRecursion( subRecCheckList ) class MatchFirst(ParseExpression): """ Requires that at least one C{ParseExpression} is found. If two expressions match, the first one listed is the one that will match. May be constructed using the C{'|'} operator. Example:: # construct MatchFirst using '|' operator # watch the order of expressions to match number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] # put more selective expression first number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] """ def __init__( self, exprs, savelist = False ): super(MatchFirst,self).__init__(exprs, savelist) if self.exprs: self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) else: self.mayReturnEmpty = True def parseImpl( self, instring, loc, doActions=True ): maxExcLoc = -1 maxException = None for e in self.exprs: try: ret = e._parse( instring, loc, doActions ) return ret except ParseException as err: if err.loc > maxExcLoc: maxException = err maxExcLoc = err.loc except IndexError: if len(instring) > maxExcLoc: maxException = ParseException(instring,len(instring),e.errmsg,self) maxExcLoc = len(instring) # only got here if no expression matched, raise exception for match that made it the furthest else: if maxException is not None: maxException.msg = self.errmsg raise maxException else: raise ParseException(instring, loc, "no defined alternatives to match", self) def __ior__(self, other ): if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) return self.append( other ) #MatchFirst( [ self, other ] ) def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}" return self.strRepr def checkRecursion( self, parseElementList ): subRecCheckList = parseElementList[:] + [ self ] for e in self.exprs: e.checkRecursion( subRecCheckList ) class Each(ParseExpression): """ Requires all given C{ParseExpression}s to be found, but in any order. Expressions may be separated by whitespace. May be constructed using the C{'&'} operator. Example:: color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") integer = Word(nums) shape_attr = "shape:" + shape_type("shape") posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") color_attr = "color:" + color("color") size_attr = "size:" + integer("size") # use Each (using operator '&') to accept attributes in any order # (shape and posn are required, color and size are optional) shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) shape_spec.runTests(''' shape: SQUARE color: BLACK posn: 100, 120 shape: CIRCLE size: 50 color: BLUE posn: 50,80 color:GREEN size:20 shape:TRIANGLE posn:20,40 ''' ) prints:: shape: SQUARE color: BLACK posn: 100, 120 ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - color: BLACK - posn: ['100', ',', '120'] - x: 100 - y: 120 - shape: SQUARE shape: CIRCLE size: 50 color: BLUE posn: 50,80 ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - color: BLUE - posn: ['50', ',', '80'] - x: 50 - y: 80 - shape: CIRCLE - size: 50 color: GREEN size: 20 shape: TRIANGLE posn: 20,40 ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - color: GREEN - posn: ['20', ',', '40'] - x: 20 - y: 40 - shape: TRIANGLE - size: 20 """ def __init__( self, exprs, savelist = True ): super(Each,self).__init__(exprs, savelist) self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) self.skipWhitespace = True self.initExprGroups = True def parseImpl( self, instring, loc, doActions=True ): if self.initExprGroups: self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional)) opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ] opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)] self.optionals = opt1 + opt2 self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ] self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ] self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ] self.required += self.multirequired self.initExprGroups = False tmpLoc = loc tmpReqd = self.required[:] tmpOpt = self.optionals[:] matchOrder = [] keepMatching = True while keepMatching: tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired failed = [] for e in tmpExprs: try: tmpLoc = e.tryParse( instring, tmpLoc ) except ParseException: failed.append(e) else: matchOrder.append(self.opt1map.get(id(e),e)) if e in tmpReqd: tmpReqd.remove(e) elif e in tmpOpt: tmpOpt.remove(e) if len(failed) == len(tmpExprs): keepMatching = False if tmpReqd: missing = ", ".join(_ustr(e) for e in tmpReqd) raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing ) # add any unmatched Optionals, in case they have default values defined matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt] resultlist = [] for e in matchOrder: loc,results = e._parse(instring,loc,doActions) resultlist.append(results) finalResults = sum(resultlist, ParseResults([])) return loc, finalResults def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}" return self.strRepr def checkRecursion( self, parseElementList ): subRecCheckList = parseElementList[:] + [ self ] for e in self.exprs: e.checkRecursion( subRecCheckList ) class ParseElementEnhance(ParserElement): """ Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens. """ def __init__( self, expr, savelist=False ): super(ParseElementEnhance,self).__init__(savelist) if isinstance( expr, basestring ): if issubclass(ParserElement._literalStringClass, Token): expr = ParserElement._literalStringClass(expr) else: expr = ParserElement._literalStringClass(Literal(expr)) self.expr = expr self.strRepr = None if expr is not None: self.mayIndexError = expr.mayIndexError self.mayReturnEmpty = expr.mayReturnEmpty self.setWhitespaceChars( expr.whiteChars ) self.skipWhitespace = expr.skipWhitespace self.saveAsList = expr.saveAsList self.callPreparse = expr.callPreparse self.ignoreExprs.extend(expr.ignoreExprs) def parseImpl( self, instring, loc, doActions=True ): if self.expr is not None: return self.expr._parse( instring, loc, doActions, callPreParse=False ) else: raise ParseException("",loc,self.errmsg,self) def leaveWhitespace( self ): self.skipWhitespace = False self.expr = self.expr.copy() if self.expr is not None: self.expr.leaveWhitespace() return self def ignore( self, other ): if isinstance( other, Suppress ): if other not in self.ignoreExprs: super( ParseElementEnhance, self).ignore( other ) if self.expr is not None: self.expr.ignore( self.ignoreExprs[-1] ) else: super( ParseElementEnhance, self).ignore( other ) if self.expr is not None: self.expr.ignore( self.ignoreExprs[-1] ) return self def streamline( self ): super(ParseElementEnhance,self).streamline() if self.expr is not None: self.expr.streamline() return self def checkRecursion( self, parseElementList ): if self in parseElementList: raise RecursiveGrammarException( parseElementList+[self] ) subRecCheckList = parseElementList[:] + [ self ] if self.expr is not None: self.expr.checkRecursion( subRecCheckList ) def validate( self, validateTrace=[] ): tmp = validateTrace[:]+[self] if self.expr is not None: self.expr.validate(tmp) self.checkRecursion( [] ) def __str__( self ): try: return super(ParseElementEnhance,self).__str__() except Exception: pass if self.strRepr is None and self.expr is not None: self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) ) return self.strRepr class FollowedBy(ParseElementEnhance): """ Lookahead matching of the given parse expression. C{FollowedBy} does I{not} advance the parsing position within the input string, it only verifies that the specified parse expression matches at the current position. C{FollowedBy} always returns a null token list. Example:: # use FollowedBy to match a label only if it is followed by a ':' data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() prints:: [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] """ def __init__( self, expr ): super(FollowedBy,self).__init__(expr) self.mayReturnEmpty = True def parseImpl( self, instring, loc, doActions=True ): self.expr.tryParse( instring, loc ) return loc, [] class NotAny(ParseElementEnhance): """ Lookahead to disallow matching with the given parse expression. C{NotAny} does I{not} advance the parsing position within the input string, it only verifies that the specified parse expression does I{not} match at the current position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny} always returns a null token list. May be constructed using the '~' operator. Example:: """ def __init__( self, expr ): super(NotAny,self).__init__(expr) #~ self.leaveWhitespace() self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs self.mayReturnEmpty = True self.errmsg = "Found unwanted token, "+_ustr(self.expr) def parseImpl( self, instring, loc, doActions=True ): if self.expr.canParseNext(instring, loc): raise ParseException(instring, loc, self.errmsg, self) return loc, [] def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: self.strRepr = "~{" + _ustr(self.expr) + "}" return self.strRepr class _MultipleMatch(ParseElementEnhance): def __init__( self, expr, stopOn=None): super(_MultipleMatch, self).__init__(expr) self.saveAsList = True ender = stopOn if isinstance(ender, basestring): ender = ParserElement._literalStringClass(ender) self.not_ender = ~ender if ender is not None else None def parseImpl( self, instring, loc, doActions=True ): self_expr_parse = self.expr._parse self_skip_ignorables = self._skipIgnorables check_ender = self.not_ender is not None if check_ender: try_not_ender = self.not_ender.tryParse # must be at least one (but first see if we are the stopOn sentinel; # if so, fail) if check_ender: try_not_ender(instring, loc) loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False ) try: hasIgnoreExprs = (not not self.ignoreExprs) while 1: if check_ender: try_not_ender(instring, loc) if hasIgnoreExprs: preloc = self_skip_ignorables( instring, loc ) else: preloc = loc loc, tmptokens = self_expr_parse( instring, preloc, doActions ) if tmptokens or tmptokens.haskeys(): tokens += tmptokens except (ParseException,IndexError): pass return loc, tokens class OneOrMore(_MultipleMatch): """ Repetition of one or more of the given expression. Parameters: - expr - expression that must match one or more times - stopOn - (default=C{None}) - expression for a terminating sentinel (only required if the sentinel would ordinarily match the repetition expression) Example:: data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) text = "shape: SQUARE posn: upper left color: BLACK" OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] # use stopOn attribute for OneOrMore to avoid reading label string as part of the data attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] # could also be written as (attr_expr * (1,)).parseString(text).pprint() """ def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: self.strRepr = "{" + _ustr(self.expr) + "}..." return self.strRepr class ZeroOrMore(_MultipleMatch): """ Optional repetition of zero or more of the given expression. Parameters: - expr - expression that must match zero or more times - stopOn - (default=C{None}) - expression for a terminating sentinel (only required if the sentinel would ordinarily match the repetition expression) Example: similar to L{OneOrMore} """ def __init__( self, expr, stopOn=None): super(ZeroOrMore,self).__init__(expr, stopOn=stopOn) self.mayReturnEmpty = True def parseImpl( self, instring, loc, doActions=True ): try: return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) except (ParseException,IndexError): return loc, [] def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: self.strRepr = "[" + _ustr(self.expr) + "]..." return self.strRepr class _NullToken(object): def __bool__(self): return False __nonzero__ = __bool__ def __str__(self): return "" _optionalNotMatched = _NullToken() class Optional(ParseElementEnhance): """ Optional matching of the given expression. Parameters: - expr - expression that must match zero or more times - default (optional) - value to be returned if the optional expression is not found. Example:: # US postal code can be a 5-digit zip, plus optional 4-digit qualifier zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) zip.runTests(''' # traditional ZIP code 12345 # ZIP+4 form 12101-0001 # invalid ZIP 98765- ''') prints:: # traditional ZIP code 12345 ['12345'] # ZIP+4 form 12101-0001 ['12101-0001'] # invalid ZIP 98765- ^ FAIL: Expected end of text (at char 5), (line:1, col:6) """ def __init__( self, expr, default=_optionalNotMatched ): super(Optional,self).__init__( expr, savelist=False ) self.saveAsList = self.expr.saveAsList self.defaultValue = default self.mayReturnEmpty = True def parseImpl( self, instring, loc, doActions=True ): try: loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) except (ParseException,IndexError): if self.defaultValue is not _optionalNotMatched: if self.expr.resultsName: tokens = ParseResults([ self.defaultValue ]) tokens[self.expr.resultsName] = self.defaultValue else: tokens = [ self.defaultValue ] else: tokens = [] return loc, tokens def __str__( self ): if hasattr(self,"name"): return self.name if self.strRepr is None: self.strRepr = "[" + _ustr(self.expr) + "]" return self.strRepr class SkipTo(ParseElementEnhance): """ Token for skipping over all undefined text until the matched expression is found. Parameters: - expr - target expression marking the end of the data to be skipped - include - (default=C{False}) if True, the target expression is also parsed (the skipped text and target expression are returned as a 2-element list). - ignore - (default=C{None}) used to define grammars (typically quoted strings and comments) that might contain false matches to the target expression - failOn - (default=C{None}) define expressions that are not allowed to be included in the skipped test; if found before the target expression is found, the SkipTo is not a match Example:: report = ''' Outstanding Issues Report - 1 Jan 2000 # | Severity | Description | Days Open -----+----------+-------------------------------------------+----------- 101 | Critical | Intermittent system crash | 6 94 | Cosmetic | Spelling error on Login ('log|n') | 14 79 | Minor | System slow when running too many reports | 47 ''' integer = Word(nums) SEP = Suppress('|') # use SkipTo to simply match everything up until the next SEP # - ignore quoted strings, so that a '|' character inside a quoted string does not match # - parse action will call token.strip() for each matched token, i.e., the description body string_data = SkipTo(SEP, ignore=quotedString) string_data.setParseAction(tokenMap(str.strip)) ticket_expr = (integer("issue_num") + SEP + string_data("sev") + SEP + string_data("desc") + SEP + integer("days_open")) for tkt in ticket_expr.searchString(report): print tkt.dump() prints:: ['101', 'Critical', 'Intermittent system crash', '6'] - days_open: 6 - desc: Intermittent system crash - issue_num: 101 - sev: Critical ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - days_open: 14 - desc: Spelling error on Login ('log|n') - issue_num: 94 - sev: Cosmetic ['79', 'Minor', 'System slow when running too many reports', '47'] - days_open: 47 - desc: System slow when running too many reports - issue_num: 79 - sev: Minor """ def __init__( self, other, include=False, ignore=None, failOn=None ): super( SkipTo, self ).__init__( other ) self.ignoreExpr = ignore self.mayReturnEmpty = True self.mayIndexError = False self.includeMatch = include self.asList = False if isinstance(failOn, basestring): self.failOn = ParserElement._literalStringClass(failOn) else: self.failOn = failOn self.errmsg = "No match found for "+_ustr(self.expr) def parseImpl( self, instring, loc, doActions=True ): startloc = loc instrlen = len(instring) expr = self.expr expr_parse = self.expr._parse self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None tmploc = loc while tmploc <= instrlen: if self_failOn_canParseNext is not None: # break if failOn expression matches if self_failOn_canParseNext(instring, tmploc): break if self_ignoreExpr_tryParse is not None: # advance past ignore expressions while 1: try: tmploc = self_ignoreExpr_tryParse(instring, tmploc) except ParseBaseException: break try: expr_parse(instring, tmploc, doActions=False, callPreParse=False) except (ParseException, IndexError): # no match, advance loc in string tmploc += 1 else: # matched skipto expr, done break else: # ran off the end of the input string without matching skipto expr, fail raise ParseException(instring, loc, self.errmsg, self) # build up return values loc = tmploc skiptext = instring[startloc:loc] skipresult = ParseResults(skiptext) if self.includeMatch: loc, mat = expr_parse(instring,loc,doActions,callPreParse=False) skipresult += mat return loc, skipresult class Forward(ParseElementEnhance): """ Forward declaration of an expression to be defined later - used for recursive grammars, such as algebraic infix notation. When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator. Note: take care when assigning to C{Forward} not to overlook precedence of operators. Specifically, '|' has a lower precedence than '<<', so that:: fwdExpr << a | b | c will actually be evaluated as:: (fwdExpr << a) | b | c thereby leaving b and c out as parseable alternatives. It is recommended that you explicitly group the values inserted into the C{Forward}:: fwdExpr << (a | b | c) Converting to use the '<<=' operator instead will avoid this problem. See L{ParseResults.pprint} for an example of a recursive parser created using C{Forward}. """ def __init__( self, other=None ): super(Forward,self).__init__( other, savelist=False ) def __lshift__( self, other ): if isinstance( other, basestring ): other = ParserElement._literalStringClass(other) self.expr = other self.strRepr = None self.mayIndexError = self.expr.mayIndexError self.mayReturnEmpty = self.expr.mayReturnEmpty self.setWhitespaceChars( self.expr.whiteChars ) self.skipWhitespace = self.expr.skipWhitespace self.saveAsList = self.expr.saveAsList self.ignoreExprs.extend(self.expr.ignoreExprs) return self def __ilshift__(self, other): return self << other def leaveWhitespace( self ): self.skipWhitespace = False return self def streamline( self ): if not self.streamlined: self.streamlined = True if self.expr is not None: self.expr.streamline() return self def validate( self, validateTrace=[] ): if self not in validateTrace: tmp = validateTrace[:]+[self] if self.expr is not None: self.expr.validate(tmp) self.checkRecursion([]) def __str__( self ): if hasattr(self,"name"): return self.name return self.__class__.__name__ + ": ..." # stubbed out for now - creates awful memory and perf issues self._revertClass = self.__class__ self.__class__ = _ForwardNoRecurse try: if self.expr is not None: retString = _ustr(self.expr) else: retString = "None" finally: self.__class__ = self._revertClass return self.__class__.__name__ + ": " + retString def copy(self): if self.expr is not None: return super(Forward,self).copy() else: ret = Forward() ret <<= self return ret class _ForwardNoRecurse(Forward): def __str__( self ): return "..." class TokenConverter(ParseElementEnhance): """ Abstract subclass of C{ParseExpression}, for converting parsed results. """ def __init__( self, expr, savelist=False ): super(TokenConverter,self).__init__( expr )#, savelist ) self.saveAsList = False class Combine(TokenConverter): """ Converter to concatenate all matching tokens to a single string. By default, the matching patterns must also be contiguous in the input string; this can be disabled by specifying C{'adjacent=False'} in the constructor. Example:: real = Word(nums) + '.' + Word(nums) print(real.parseString('3.1416')) # -> ['3', '.', '1416'] # will also erroneously match the following print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] real = Combine(Word(nums) + '.' + Word(nums)) print(real.parseString('3.1416')) # -> ['3.1416'] # no match when there are internal spaces print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) """ def __init__( self, expr, joinString="", adjacent=True ): super(Combine,self).__init__( expr ) # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself if adjacent: self.leaveWhitespace() self.adjacent = adjacent self.skipWhitespace = True self.joinString = joinString self.callPreparse = True def ignore( self, other ): if self.adjacent: ParserElement.ignore(self, other) else: super( Combine, self).ignore( other ) return self def postParse( self, instring, loc, tokenlist ): retToks = tokenlist.copy() del retToks[:] retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults) if self.resultsName and retToks.haskeys(): return [ retToks ] else: return retToks class Group(TokenConverter): """ Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions. Example:: ident = Word(alphas) num = Word(nums) term = ident | num func = ident + Optional(delimitedList(term)) print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100'] func = ident + Group(Optional(delimitedList(term))) print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']] """ def __init__( self, expr ): super(Group,self).__init__( expr ) self.saveAsList = True def postParse( self, instring, loc, tokenlist ): return [ tokenlist ] class Dict(TokenConverter): """ Converter to return a repetitive expression as a list, but also as a dictionary. Each element can also be referenced using the first token in the expression as its key. Useful for tabular report scraping when the first column can be used as a item key. Example:: data_word = Word(alphas) label = data_word + FollowedBy(':') attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) text = "shape: SQUARE posn: upper left color: light blue texture: burlap" attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) # print attributes as plain groups print(OneOrMore(attr_expr).parseString(text).dump()) # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names result = Dict(OneOrMore(Group(attr_expr))).parseString(text) print(result.dump()) # access named fields as dict entries, or output as dict print(result['shape']) print(result.asDict()) prints:: ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - color: light blue - posn: upper left - shape: SQUARE - texture: burlap SQUARE {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} See more examples at L{ParseResults} of accessing fields by results name. """ def __init__( self, expr ): super(Dict,self).__init__( expr ) self.saveAsList = True def postParse( self, instring, loc, tokenlist ): for i,tok in enumerate(tokenlist): if len(tok) == 0: continue ikey = tok[0] if isinstance(ikey,int): ikey = _ustr(tok[0]).strip() if len(tok)==1: tokenlist[ikey] = _ParseResultsWithOffset("",i) elif len(tok)==2 and not isinstance(tok[1],ParseResults): tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i) else: dictvalue = tok.copy() #ParseResults(i) del dictvalue[0] if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()): tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i) else: tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i) if self.resultsName: return [ tokenlist ] else: return tokenlist class Suppress(TokenConverter): """ Converter for ignoring the results of a parsed expression. Example:: source = "a, b, c,d" wd = Word(alphas) wd_list1 = wd + ZeroOrMore(',' + wd) print(wd_list1.parseString(source)) # often, delimiters that are useful during parsing are just in the # way afterward - use Suppress to keep them out of the parsed output wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) print(wd_list2.parseString(source)) prints:: ['a', ',', 'b', ',', 'c', ',', 'd'] ['a', 'b', 'c', 'd'] (See also L{delimitedList}.) """ def postParse( self, instring, loc, tokenlist ): return [] def suppress( self ): return self class OnlyOnce(object): """ Wrapper for parse actions, to ensure they are only called once. """ def __init__(self, methodCall): self.callable = _trim_arity(methodCall) self.called = False def __call__(self,s,l,t): if not self.called: results = self.callable(s,l,t) self.called = True return results raise ParseException(s,l,"") def reset(self): self.called = False def traceParseAction(f): """ Decorator for debugging parse actions. When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".} When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised. Example:: wd = Word(alphas) @traceParseAction def remove_duplicate_chars(tokens): return ''.join(sorted(set(''.join(tokens)))) wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) print(wds.parseString("slkdjs sld sldd sdlf sdljf")) prints:: >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) <<leaving remove_duplicate_chars (ret: 'dfjkls') ['dfjkls'] """ f = _trim_arity(f) def z(*paArgs): thisFunc = f.__name__ s,l,t = paArgs[-3:] if len(paArgs)>3: thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) ) try: ret = f(*paArgs) except Exception as exc: sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) ) raise sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) ) return ret try: z.__name__ = f.__name__ except AttributeError: pass return z # # global helpers # def delimitedList( expr, delim=",", combine=False ): """ Helper to define a delimited list of expressions - the delimiter defaults to ','. By default, the list elements and delimiters can have intervening whitespace, and comments, but this can be overridden by passing C{combine=True} in the constructor. If C{combine} is set to C{True}, the matching tokens are returned as a single token string, with the delimiters included; otherwise, the matching tokens are returned as a list of tokens, with the delimiters suppressed. Example:: delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc'] delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] """ dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..." if combine: return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName) else: return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName) def countedArray( expr, intExpr=None ): """ Helper to define a counted list of expressions. This helper defines a pattern of the form:: integer expr expr expr... where the leading integer tells how many expr expressions follow. The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value. Example:: countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] # in this parser, the leading integer value is given in binary, # '10' indicating that 2 values are in the array binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] """ arrayExpr = Forward() def countFieldParseAction(s,l,t): n = t[0] arrayExpr << (n and Group(And([expr]*n)) or Group(empty)) return [] if intExpr is None: intExpr = Word(nums).setParseAction(lambda t:int(t[0])) else: intExpr = intExpr.copy() intExpr.setName("arrayLen") intExpr.addParseAction(countFieldParseAction, callDuringTry=True) return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...') def _flatten(L): ret = [] for i in L: if isinstance(i,list): ret.extend(_flatten(i)) else: ret.append(i) return ret def matchPreviousLiteral(expr): """ Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousLiteral(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches a previous literal, will also match the leading C{"1:1"} in C{"1:10"}. If this is not desired, use C{matchPreviousExpr}. Do I{not} use with packrat parsing enabled. """ rep = Forward() def copyTokenToRepeater(s,l,t): if t: if len(t) == 1: rep << t[0] else: # flatten t tokens tflat = _flatten(t.asList()) rep << And(Literal(tt) for tt in tflat) else: rep << Empty() expr.addParseAction(copyTokenToRepeater, callDuringTry=True) rep.setName('(prev) ' + _ustr(expr)) return rep def matchPreviousExpr(expr): """ Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousExpr(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches by expressions, will I{not} match the leading C{"1:1"} in C{"1:10"}; the expressions are evaluated first, and then compared, so C{"1"} is compared with C{"10"}. Do I{not} use with packrat parsing enabled. """ rep = Forward() e2 = expr.copy() rep <<= e2 def copyTokenToRepeater(s,l,t): matchTokens = _flatten(t.asList()) def mustMatchTheseTokens(s,l,t): theseTokens = _flatten(t.asList()) if theseTokens != matchTokens: raise ParseException("",0,"") rep.setParseAction( mustMatchTheseTokens, callDuringTry=True ) expr.addParseAction(copyTokenToRepeater, callDuringTry=True) rep.setName('(prev) ' + _ustr(expr)) return rep def _escapeRegexRangeChars(s): #~ escape these chars: ^-] for c in r"\^-]": s = s.replace(c,_bslash+c) s = s.replace("\n",r"\n") s = s.replace("\t",r"\t") return _ustr(s) def oneOf( strs, caseless=False, useRegex=True ): """ Helper to quickly define a set of alternative Literals, and makes sure to do longest-first testing when there is a conflict, regardless of the input order, but returns a C{L{MatchFirst}} for best performance. Parameters: - strs - a string of space-delimited literals, or a collection of string literals - caseless - (default=C{False}) - treat all literals as caseless - useRegex - (default=C{True}) - as an optimization, will generate a Regex object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or if creating a C{Regex} raises an exception) Example:: comp_oper = oneOf("< = > <= >= !=") var = Word(alphas) number = Word(nums) term = var | number comparison_expr = term + comp_oper + term print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) prints:: [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] """ if caseless: isequal = ( lambda a,b: a.upper() == b.upper() ) masks = ( lambda a,b: b.upper().startswith(a.upper()) ) parseElementClass = CaselessLiteral else: isequal = ( lambda a,b: a == b ) masks = ( lambda a,b: b.startswith(a) ) parseElementClass = Literal symbols = [] if isinstance(strs,basestring): symbols = strs.split() elif isinstance(strs, Iterable): symbols = list(strs) else: warnings.warn("Invalid argument to oneOf, expected string or iterable", SyntaxWarning, stacklevel=2) if not symbols: return NoMatch() i = 0 while i < len(symbols)-1: cur = symbols[i] for j,other in enumerate(symbols[i+1:]): if ( isequal(other, cur) ): del symbols[i+j+1] break elif ( masks(cur, other) ): del symbols[i+j+1] symbols.insert(i,other) cur = other break else: i += 1 if not caseless and useRegex: #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] )) try: if len(symbols)==len("".join(symbols)): return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols)) else: return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols)) except Exception: warnings.warn("Exception creating Regex for oneOf, building MatchFirst", SyntaxWarning, stacklevel=2) # last resort, just use MatchFirst return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) def dictOf( key, value ): """ Helper to easily and clearly define a dictionary by specifying the respective patterns for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens in the proper order. The key pattern can include delimiting markers or punctuation, as long as they are suppressed, thereby leaving the significant key text. The value pattern can include named results, so that the C{Dict} results can include named token fields. Example:: text = "shape: SQUARE posn: upper left color: light blue texture: burlap" attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) print(OneOrMore(attr_expr).parseString(text).dump()) attr_label = label attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) # similar to Dict, but simpler call format result = dictOf(attr_label, attr_value).parseString(text) print(result.dump()) print(result['shape']) print(result.shape) # object attribute access works too print(result.asDict()) prints:: [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - color: light blue - posn: upper left - shape: SQUARE - texture: burlap SQUARE SQUARE {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} """ return Dict( ZeroOrMore( Group ( key + value ) ) ) def originalTextFor(expr, asString=True): """ Helper to return the original, untokenized text for a given expression. Useful to restore the parsed fields of an HTML start tag into the raw tag text itself, or to revert separate tokens with intervening whitespace back to the original matching input text. By default, returns astring containing the original parsed text. If the optional C{asString} argument is passed as C{False}, then the return value is a C{L{ParseResults}} containing any results names that were originally matched, and a single token containing the original matched text from the input string. So if the expression passed to C{L{originalTextFor}} contains expressions with defined results names, you must set C{asString} to C{False} if you want to preserve those results name values. Example:: src = "this is test <b> bold <i>text</i> </b> normal text " for tag in ("b","i"): opener,closer = makeHTMLTags(tag) patt = originalTextFor(opener + SkipTo(closer) + closer) print(patt.searchString(src)[0]) prints:: ['<b> bold <i>text</i> </b>'] ['<i>text</i>'] """ locMarker = Empty().setParseAction(lambda s,loc,t: loc) endlocMarker = locMarker.copy() endlocMarker.callPreparse = False matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") if asString: extractText = lambda s,l,t: s[t._original_start:t._original_end] else: def extractText(s,l,t): t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]] matchExpr.setParseAction(extractText) matchExpr.ignoreExprs = expr.ignoreExprs return matchExpr def ungroup(expr): """ Helper to undo pyparsing's default grouping of And expressions, even if all but one are non-empty. """ return TokenConverter(expr).setParseAction(lambda t:t[0]) def locatedExpr(expr): """ Helper to decorate a returned token with its starting and ending locations in the input string. This helper adds the following results names: - locn_start = location where matched expression begins - locn_end = location where matched expression ends - value = the actual parsed results Be careful if the input text contains C{<TAB>} characters, you may want to call C{L{ParserElement.parseWithTabs}} Example:: wd = Word(alphas) for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): print(match) prints:: [[0, 'ljsdf', 5]] [[8, 'lksdjjf', 15]] [[18, 'lkkjj', 23]] """ locator = Empty().setParseAction(lambda s,l,t: l) return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) # convenience constants for positional expressions empty = Empty().setName("empty") lineStart = LineStart().setName("lineStart") lineEnd = LineEnd().setName("lineEnd") stringStart = StringStart().setName("stringStart") stringEnd = StringEnd().setName("stringEnd") _escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1]) _escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16))) _escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8))) _singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1) _charRange = Group(_singleChar + Suppress("-") + _singleChar) _reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]" def srange(s): r""" Helper to easily define string ranges for use in Word construction. Borrows syntax from regexp '[]' string range definitions:: srange("[0-9]") -> "0123456789" srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" The input string must be enclosed in []'s, and the returned string is the expanded character set joined into a single string. The values enclosed in the []'s may be: - a single character - an escaped character with a leading backslash (such as C{\-} or C{\]}) - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) (C{\0x##} is also supported for backwards compatibility) - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character) - a range of any of the above, separated by a dash (C{'a-z'}, etc.) - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.) """ _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1)) try: return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body) except Exception: return "" def matchOnlyAtCol(n): """ Helper method for defining parse actions that require matching at a specific column in the input text. """ def verifyCol(strg,locn,toks): if col(locn,strg) != n: raise ParseException(strg,locn,"matched token not at column %d" % n) return verifyCol def replaceWith(replStr): """ Helper method for common parse actions that simply return a literal value. Especially useful when used with C{L{transformString<ParserElement.transformString>}()}. Example:: num = Word(nums).setParseAction(lambda toks: int(toks[0])) na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) term = na | num OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] """ return lambda s,l,t: [replStr] def removeQuotes(s,l,t): """ Helper parse action for removing quotation marks from parsed quoted strings. Example:: # by default, quotation marks are included in parsed results quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] # use removeQuotes to strip quotation marks from parsed results quotedString.setParseAction(removeQuotes) quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] """ return t[0][1:-1] def tokenMap(func, *args): """ Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional args are passed, they are forwarded to the given function as additional arguments after the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the parsed data to an integer using base 16. Example (compare the last to example in L{ParserElement.transformString}:: hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) hex_ints.runTests(''' 00 11 22 aa FF 0a 0d 1a ''') upperword = Word(alphas).setParseAction(tokenMap(str.upper)) OneOrMore(upperword).runTests(''' my kingdom for a horse ''') wd = Word(alphas).setParseAction(tokenMap(str.title)) OneOrMore(wd).setParseAction(' '.join).runTests(''' now is the winter of our discontent made glorious summer by this sun of york ''') prints:: 00 11 22 aa FF 0a 0d 1a [0, 17, 34, 170, 255, 10, 13, 26] my kingdom for a horse ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] now is the winter of our discontent made glorious summer by this sun of york ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] """ def pa(s,l,t): return [func(tokn, *args) for tokn in t] try: func_name = getattr(func, '__name__', getattr(func, '__class__').__name__) except Exception: func_name = str(func) pa.__name__ = func_name return pa upcaseTokens = tokenMap(lambda t: _ustr(t).upper()) """(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}""" downcaseTokens = tokenMap(lambda t: _ustr(t).lower()) """(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}""" def _makeTags(tagStr, xml): """Internal helper to construct opening and closing tag expressions, given a tag name""" if isinstance(tagStr,basestring): resname = tagStr tagStr = Keyword(tagStr, caseless=not xml) else: resname = tagStr.name tagAttrName = Word(alphas,alphanums+"_-:") if (xml): tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes ) openTag = Suppress("<") + tagStr("tag") + \ Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \ Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") else: printablesLessRAbrack = "".join(c for c in printables if c not in ">") tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack) openTag = Suppress("<") + tagStr("tag") + \ Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \ Optional( Suppress("=") + tagAttrValue ) ))) + \ Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") closeTag = Combine(_L("</") + tagStr + ">") openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname) closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname) openTag.tag = resname closeTag.tag = resname return openTag, closeTag def makeHTMLTags(tagStr): """ Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values. Example:: text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>' # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple a,a_end = makeHTMLTags("A") link_expr = a + SkipTo(a_end)("link_text") + a_end for link in link_expr.searchString(text): # attributes in the <A> tag (like "href" shown here) are also accessible as named results print(link.link_text, '->', link.href) prints:: pyparsing -> http://pyparsing.wikispaces.com """ return _makeTags( tagStr, False ) def makeXMLTags(tagStr): """ Helper to construct opening and closing tag expressions for XML, given a tag name. Matches tags only in the given upper/lower case. Example: similar to L{makeHTMLTags} """ return _makeTags( tagStr, True ) def withAttribute(*args,**attrDict): """ Helper to create a validating parse action to be used with start tags created with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag with a required attribute value, to avoid false matches on common tags such as C{<TD>} or C{<DIV>}. Call C{withAttribute} with a series of attribute names and values. Specify the list of filter attributes names and values as: - keyword arguments, as in C{(align="right")}, or - as an explicit dict with C{**} operator, when an attribute name is also a Python reserved word, as in C{**{"class":"Customer", "align":"right"}} - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) For attribute names with a namespace prefix, you must use the second form. Attribute names are matched insensitive to upper/lower case. If just testing for C{class} (with or without a namespace), use C{L{withClass}}. To verify that the attribute exists, but without specifying a value, pass C{withAttribute.ANY_VALUE} as the value. Example:: html = ''' <div> Some text <div type="grid">1 4 0 1 0</div> <div type="graph">1,3 2,3 1,1</div> <div>this has no type</div> </div> ''' div,div_end = makeHTMLTags("div") # only match div tag having a type attribute with value "grid" div_grid = div().setParseAction(withAttribute(type="grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) # construct a match with any div tag having a type attribute, regardless of the value div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 """ if args: attrs = args[:] else: attrs = attrDict.items() attrs = [(k,v) for k,v in attrs] def pa(s,l,tokens): for attrName,attrValue in attrs: if attrName not in tokens: raise ParseException(s,l,"no matching attribute " + attrName) if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % (attrName, tokens[attrName], attrValue)) return pa withAttribute.ANY_VALUE = object() def withClass(classname, namespace=''): """ Simplified version of C{L{withAttribute}} when matching on a div class - made difficult because C{class} is a reserved word in Python. Example:: html = ''' <div> Some text <div class="grid">1 4 0 1 0</div> <div class="graph">1,3 2,3 1,1</div> <div>this &lt;div&gt; has no class</div> </div> ''' div,div_end = makeHTMLTags("div") div_grid = div().setParseAction(withClass("grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 """ classattr = "%s:class" % namespace if namespace else "class" return withAttribute(**{classattr : classname}) opAssoc = _Constants() opAssoc.LEFT = object() opAssoc.RIGHT = object() def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): """ Helper method for constructing grammars of expressions made up of operators working in a precedence hierarchy. Operators may be unary or binary, left- or right-associative. Parse actions can also be attached to operator expressions. The generated parser will also recognize the use of parentheses to override operator precedences (see example below). Note: if you define a deep operator list, you may see performance issues when using infixNotation. See L{ParserElement.enablePackrat} for a mechanism to potentially improve your parser performance. Parameters: - baseExpr - expression representing the most basic element for the nested - opList - list of tuples, one for each operator precedence level in the expression grammar; each tuple is of the form (opExpr, numTerms, rightLeftAssoc, parseAction), where: - opExpr is the pyparsing expression for the operator; may also be a string, which will be converted to a Literal; if numTerms is 3, opExpr is a tuple of two expressions, for the two operators separating the 3 terms - numTerms is the number of terms for this operator (must be 1, 2, or 3) - rightLeftAssoc is the indicator whether the operator is right or left associative, using the pyparsing-defined constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - parseAction is the parse action to be associated with expressions matching this operator expression (the parse action tuple member may be omitted); if the parse action is passed a tuple or list of functions, this is equivalent to calling C{setParseAction(*fn)} (L{ParserElement.setParseAction}) - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) Example:: # simple example of four-function arithmetic with ints and variable names integer = pyparsing_common.signed_integer varname = pyparsing_common.identifier arith_expr = infixNotation(integer | varname, [ ('-', 1, opAssoc.RIGHT), (oneOf('* /'), 2, opAssoc.LEFT), (oneOf('+ -'), 2, opAssoc.LEFT), ]) arith_expr.runTests(''' 5+3*6 (5+3)*6 -2--11 ''', fullDump=False) prints:: 5+3*6 [[5, '+', [3, '*', 6]]] (5+3)*6 [[[5, '+', 3], '*', 6]] -2--11 [[['-', 2], '-', ['-', 11]]] """ ret = Forward() lastExpr = baseExpr | ( lpar + ret + rpar ) for i,operDef in enumerate(opList): opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr if arity == 3: if opExpr is None or len(opExpr) != 2: raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") opExpr1, opExpr2 = opExpr thisExpr = Forward().setName(termName) if rightLeftAssoc == opAssoc.LEFT: if arity == 1: matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) elif arity == 2: if opExpr is not None: matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) else: matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) elif arity == 3: matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") elif rightLeftAssoc == opAssoc.RIGHT: if arity == 1: # try to avoid LR with this extra test if not isinstance(opExpr, Optional): opExpr = Optional(opExpr) matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) elif arity == 2: if opExpr is not None: matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) else: matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) elif arity == 3: matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") else: raise ValueError("operator must indicate right or left associativity") if pa: if isinstance(pa, (tuple, list)): matchExpr.setParseAction(*pa) else: matchExpr.setParseAction(pa) thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) lastExpr = thisExpr ret <<= lastExpr return ret operatorPrecedence = infixNotation """(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release.""" dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): """ Helper method for defining nested lists enclosed in opening and closing delimiters ("(" and ")" are the default). Parameters: - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression - content - expression for items within the nested lists (default=C{None}) - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) If an expression is not provided for the content argument, the nested expression will capture all whitespace-delimited content between delimiters as a list of separate values. Use the C{ignoreExpr} argument to define expressions that may contain opening or closing characters that should not be treated as opening or closing characters for nesting, such as quotedString or a comment expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. The default is L{quotedString}, but if no expressions are to be ignored, then pass C{None} for this argument. Example:: data_type = oneOf("void int short long char float double") decl_data_type = Combine(data_type + Optional(Word('*'))) ident = Word(alphas+'_', alphanums+'_') number = pyparsing_common.number arg = Group(decl_data_type + ident) LPAR,RPAR = map(Suppress, "()") code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) c_function = (decl_data_type("type") + ident("name") + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + code_body("body")) c_function.ignore(cStyleComment) source_code = ''' int is_odd(int x) { return (x%2); } int dec_to_hex(char hchar) { if (hchar >= '0' && hchar <= '9') { return (ord(hchar)-ord('0')); } else { return (10+ord(hchar)-ord('A')); } } ''' for func in c_function.searchString(source_code): print("%(name)s (%(type)s) args: %(args)s" % func) prints:: is_odd (int) args: [['int', 'x']] dec_to_hex (int) args: [['char', 'hchar']] """ if opener == closer: raise ValueError("opening and closing strings cannot be the same") if content is None: if isinstance(opener,basestring) and isinstance(closer,basestring): if len(opener) == 1 and len(closer)==1: if ignoreExpr is not None: content = (Combine(OneOrMore(~ignoreExpr + CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) ).setParseAction(lambda t:t[0].strip())) else: content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS ).setParseAction(lambda t:t[0].strip())) else: if ignoreExpr is not None: content = (Combine(OneOrMore(~ignoreExpr + ~Literal(opener) + ~Literal(closer) + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) ).setParseAction(lambda t:t[0].strip())) else: content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) ).setParseAction(lambda t:t[0].strip())) else: raise ValueError("opening and closing arguments must be strings if no content expression is given") ret = Forward() if ignoreExpr is not None: ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) else: ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) ret.setName('nested %s%s expression' % (opener,closer)) return ret def indentedBlock(blockStatementExpr, indentStack, indent=True): """ Helper method for defining space-delimited indentation blocks, such as those used to define block statements in Python source code. Parameters: - blockStatementExpr - expression defining syntax of statement that is repeated within the indented block - indentStack - list created by caller to manage indentation stack (multiple statementWithIndentedBlock expressions within a single grammar should share a common indentStack) - indent - boolean indicating whether block must be indented beyond the the current level; set to False for block of left-most statements (default=C{True}) A valid block must contain at least one C{blockStatement}. Example:: data = ''' def A(z): A1 B = 100 G = A2 A2 A3 B def BB(a,b,c): BB1 def BBA(): bba1 bba2 bba3 C D def spam(x,y): def eggs(z): pass ''' indentStack = [1] stmt = Forward() identifier = Word(alphas, alphanums) funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") func_body = indentedBlock(stmt, indentStack) funcDef = Group( funcDecl + func_body ) rvalue = Forward() funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") rvalue << (funcCall | identifier | Word(nums)) assignment = Group(identifier + "=" + rvalue) stmt << ( funcDef | assignment | identifier ) module_body = OneOrMore(stmt) parseTree = module_body.parseString(data) parseTree.pprint() prints:: [['def', 'A', ['(', 'z', ')'], ':', [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], 'B', ['def', 'BB', ['(', 'a', 'b', 'c', ')'], ':', [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], 'C', 'D', ['def', 'spam', ['(', 'x', 'y', ')'], ':', [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] """ def checkPeerIndent(s,l,t): if l >= len(s): return curCol = col(l,s) if curCol != indentStack[-1]: if curCol > indentStack[-1]: raise ParseFatalException(s,l,"illegal nesting") raise ParseException(s,l,"not a peer entry") def checkSubIndent(s,l,t): curCol = col(l,s) if curCol > indentStack[-1]: indentStack.append( curCol ) else: raise ParseException(s,l,"not a subentry") def checkUnindent(s,l,t): if l >= len(s): return curCol = col(l,s) if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): raise ParseException(s,l,"not an unindent") indentStack.pop() NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') PEER = Empty().setParseAction(checkPeerIndent).setName('') UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') if indent: smExpr = Group( Optional(NL) + #~ FollowedBy(blockStatementExpr) + INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) else: smExpr = Group( Optional(NL) + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) blockStatementExpr.ignore(_bslash + LineEnd()) return smExpr.setName('indented block') alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) _htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") def replaceHTMLEntity(t): """Helper parser action to replace common HTML entities with their special characters""" return _htmlEntityMap.get(t.entity) # it's easy to get these comment structures wrong - they're very common, so may as well make them available cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") "Comment of the form C{/* ... */}" htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment") "Comment of the form C{<!-- ... -->}" restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") "Comment of the form C{// ... (to end of line)}" cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") "Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}" javaStyleComment = cppStyleComment "Same as C{L{cppStyleComment}}" pythonStyleComment = Regex(r"#.*").setName("Python style comment") "Comment of the form C{# ... (to end of line)}" _commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + Optional( Word(" \t") + ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") """(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}.""" # some other useful expressions - using lower-case class name since we are really using this as a namespace class pyparsing_common: """ Here are some common low-level expressions that may be useful in jump-starting parser development: - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>}) - common L{programming identifiers<identifier>} - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>}) - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>} - L{UUID<uuid>} - L{comma-separated list<comma_separated_list>} Parse actions: - C{L{convertToInteger}} - C{L{convertToFloat}} - C{L{convertToDate}} - C{L{convertToDatetime}} - C{L{stripHTMLTags}} - C{L{upcaseTokens}} - C{L{downcaseTokens}} Example:: pyparsing_common.number.runTests(''' # any int or real number, returned as the appropriate type 100 -100 +100 3.14159 6.02e23 1e-12 ''') pyparsing_common.fnumber.runTests(''' # any int or real number, returned as float 100 -100 +100 3.14159 6.02e23 1e-12 ''') pyparsing_common.hex_integer.runTests(''' # hex numbers 100 FF ''') pyparsing_common.fraction.runTests(''' # fractions 1/2 -3/4 ''') pyparsing_common.mixed_integer.runTests(''' # mixed fractions 1 1/2 -3/4 1-3/4 ''') import uuid pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) pyparsing_common.uuid.runTests(''' # uuid 12345678-1234-5678-1234-567812345678 ''') prints:: # any int or real number, returned as the appropriate type 100 [100] -100 [-100] +100 [100] 3.14159 [3.14159] 6.02e23 [6.02e+23] 1e-12 [1e-12] # any int or real number, returned as float 100 [100.0] -100 [-100.0] +100 [100.0] 3.14159 [3.14159] 6.02e23 [6.02e+23] 1e-12 [1e-12] # hex numbers 100 [256] FF [255] # fractions 1/2 [0.5] -3/4 [-0.75] # mixed fractions 1 [1] 1/2 [0.5] -3/4 [-0.75] 1-3/4 [1.75] # uuid 12345678-1234-5678-1234-567812345678 [UUID('12345678-1234-5678-1234-567812345678')] """ convertToInteger = tokenMap(int) """ Parse action for converting parsed integers to Python int """ convertToFloat = tokenMap(float) """ Parse action for converting parsed numbers to Python float """ integer = Word(nums).setName("integer").setParseAction(convertToInteger) """expression that parses an unsigned integer, returns an int""" hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) """expression that parses a hexadecimal integer, returns an int""" signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) """expression that parses an integer with optional leading sign, returns an int""" fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") """fractional expression of an integer divided by an integer, returns a float""" fraction.addParseAction(lambda t: t[0]/t[-1]) mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" mixed_integer.addParseAction(sum) real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) """expression that parses a floating point number and returns a float""" sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) """expression that parses a floating point number with optional scientific notation and returns a float""" # streamlining this expression makes the docs nicer-looking number = (sci_real | real | signed_integer).streamline() """any numeric expression, returns the corresponding Python type""" fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) """any int or real number, returned as float""" identifier = Word(alphas+'_', alphanums+'_').setName("identifier") """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") "IPv4 address (C{0.0.0.0 - 255.255.255.255})" _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") "IPv6 address (long, short, or mixed form)" mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" @staticmethod def convertToDate(fmt="%Y-%m-%d"): """ Helper to create a parse action for converting parsed date string to Python datetime.date Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) Example:: date_expr = pyparsing_common.iso8601_date.copy() date_expr.setParseAction(pyparsing_common.convertToDate()) print(date_expr.parseString("1999-12-31")) prints:: [datetime.date(1999, 12, 31)] """ def cvt_fn(s,l,t): try: return datetime.strptime(t[0], fmt).date() except ValueError as ve: raise ParseException(s, l, str(ve)) return cvt_fn @staticmethod def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): """ Helper to create a parse action for converting parsed datetime string to Python datetime.datetime Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) Example:: dt_expr = pyparsing_common.iso8601_datetime.copy() dt_expr.setParseAction(pyparsing_common.convertToDatetime()) print(dt_expr.parseString("1999-12-31T23:59:59.999")) prints:: [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] """ def cvt_fn(s,l,t): try: return datetime.strptime(t[0], fmt) except ValueError as ve: raise ParseException(s, l, str(ve)) return cvt_fn iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date") "ISO8601 date (C{yyyy-mm-dd})" iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}" uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})" _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() @staticmethod def stripHTMLTags(s, l, tokens): """ Parse action to remove HTML tags from web page HTML source Example:: # strip HTML links from normal text text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>' td,td_end = makeHTMLTags("TD") table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' """ return pyparsing_common._html_stripper.transformString(tokens[0]) _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') + Optional( White(" \t") ) ) ).streamline().setName("commaItem") comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) """Parse action to convert tokens to upper case.""" downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) """Parse action to convert tokens to lower case.""" if __name__ == "__main__": selectToken = CaselessLiteral("select") fromToken = CaselessLiteral("from") ident = Word(alphas, alphanums + "_$") columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) columnNameList = Group(delimitedList(columnName)).setName("columns") columnSpec = ('*' | columnNameList) tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) tableNameList = Group(delimitedList(tableName)).setName("tables") simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") # demo runTests method, including embedded comments in test string simpleSQL.runTests(""" # '*' as column list and dotted table name select * from SYS.XYZZY # caseless match on "SELECT", and casts back to "select" SELECT * from XYZZY, ABC # list of column names, and mixed case SELECT keyword Select AA,BB,CC from Sys.dual # multiple tables Select A, B, C from Sys.dual, Table2 # invalid SELECT keyword - should fail Xelect A, B, C from Sys.dual # incomplete command - should fail Select # invalid column name - should fail Select ^^^ frox Sys.dual """) pyparsing_common.number.runTests(""" 100 -100 +100 3.14159 6.02e23 1e-12 """) # any int or real number, returned as float pyparsing_common.fnumber.runTests(""" 100 -100 +100 3.14159 6.02e23 1e-12 """) pyparsing_common.hex_integer.runTests(""" 100 FF """) import uuid pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) pyparsing_common.uuid.runTests(""" 12345678-1234-5678-1234-567812345678 """)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor/packaging/tags.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import import distutils.util try: from importlib.machinery import EXTENSION_SUFFIXES except ImportError: # pragma: no cover import imp EXTENSION_SUFFIXES = [x[0] for x in imp.get_suffixes()] del imp import platform import re import sys import sysconfig import warnings INTERPRETER_SHORT_NAMES = { "python": "py", # Generic. "cpython": "cp", "pypy": "pp", "ironpython": "ip", "jython": "jy", } _32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32 class Tag(object): __slots__ = ["_interpreter", "_abi", "_platform"] def __init__(self, interpreter, abi, platform): self._interpreter = interpreter.lower() self._abi = abi.lower() self._platform = platform.lower() @property def interpreter(self): return self._interpreter @property def abi(self): return self._abi @property def platform(self): return self._platform def __eq__(self, other): return ( (self.platform == other.platform) and (self.abi == other.abi) and (self.interpreter == other.interpreter) ) def __hash__(self): return hash((self._interpreter, self._abi, self._platform)) def __str__(self): return "{}-{}-{}".format(self._interpreter, self._abi, self._platform) def __repr__(self): return "<{self} @ {self_id}>".format(self=self, self_id=id(self)) def parse_tag(tag): tags = set() interpreters, abis, platforms = tag.split("-") for interpreter in interpreters.split("."): for abi in abis.split("."): for platform_ in platforms.split("."): tags.add(Tag(interpreter, abi, platform_)) return frozenset(tags) def _normalize_string(string): return string.replace(".", "_").replace("-", "_") def _cpython_interpreter(py_version): # TODO: Is using py_version_nodot for interpreter version critical? return "cp{major}{minor}".format(major=py_version[0], minor=py_version[1]) def _cpython_abis(py_version): abis = [] version = "{}{}".format(*py_version[:2]) debug = pymalloc = ucs4 = "" with_debug = sysconfig.get_config_var("Py_DEBUG") has_refcount = hasattr(sys, "gettotalrefcount") # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled # extension modules is the best option. # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 has_ext = "_d.pyd" in EXTENSION_SUFFIXES if with_debug or (with_debug is None and (has_refcount or has_ext)): debug = "d" if py_version < (3, 8): with_pymalloc = sysconfig.get_config_var("WITH_PYMALLOC") if with_pymalloc or with_pymalloc is None: pymalloc = "m" if py_version < (3, 3): unicode_size = sysconfig.get_config_var("Py_UNICODE_SIZE") if unicode_size == 4 or ( unicode_size is None and sys.maxunicode == 0x10FFFF ): ucs4 = "u" elif debug: # Debug builds can also load "normal" extension modules. # We can also assume no UCS-4 or pymalloc requirement. abis.append("cp{version}".format(version=version)) abis.insert( 0, "cp{version}{debug}{pymalloc}{ucs4}".format( version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4 ), ) return abis def _cpython_tags(py_version, interpreter, abis, platforms): for abi in abis: for platform_ in platforms: yield Tag(interpreter, abi, platform_) for tag in (Tag(interpreter, "abi3", platform_) for platform_ in platforms): yield tag for tag in (Tag(interpreter, "none", platform_) for platform_ in platforms): yield tag # PEP 384 was first implemented in Python 3.2. for minor_version in range(py_version[1] - 1, 1, -1): for platform_ in platforms: interpreter = "cp{major}{minor}".format( major=py_version[0], minor=minor_version ) yield Tag(interpreter, "abi3", platform_) def _pypy_interpreter(): return "pp{py_major}{pypy_major}{pypy_minor}".format( py_major=sys.version_info[0], pypy_major=sys.pypy_version_info.major, pypy_minor=sys.pypy_version_info.minor, ) def _generic_abi(): abi = sysconfig.get_config_var("SOABI") if abi: return _normalize_string(abi) else: return "none" def _pypy_tags(py_version, interpreter, abi, platforms): for tag in (Tag(interpreter, abi, platform) for platform in platforms): yield tag for tag in (Tag(interpreter, "none", platform) for platform in platforms): yield tag def _generic_tags(interpreter, py_version, abi, platforms): for tag in (Tag(interpreter, abi, platform) for platform in platforms): yield tag if abi != "none": tags = (Tag(interpreter, "none", platform_) for platform_ in platforms) for tag in tags: yield tag def _py_interpreter_range(py_version): """ Yield Python versions in descending order. After the latest version, the major-only version will be yielded, and then all following versions up to 'end'. """ yield "py{major}{minor}".format(major=py_version[0], minor=py_version[1]) yield "py{major}".format(major=py_version[0]) for minor in range(py_version[1] - 1, -1, -1): yield "py{major}{minor}".format(major=py_version[0], minor=minor) def _independent_tags(interpreter, py_version, platforms): """ Return the sequence of tags that are consistent across implementations. The tags consist of: - py*-none-<platform> - <interpreter>-none-any - py*-none-any """ for version in _py_interpreter_range(py_version): for platform_ in platforms: yield Tag(version, "none", platform_) yield Tag(interpreter, "none", "any") for version in _py_interpreter_range(py_version): yield Tag(version, "none", "any") def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER): if not is_32bit: return arch if arch.startswith("ppc"): return "ppc" return "i386" def _mac_binary_formats(version, cpu_arch): formats = [cpu_arch] if cpu_arch == "x86_64": if version < (10, 4): return [] formats.extend(["intel", "fat64", "fat32"]) elif cpu_arch == "i386": if version < (10, 4): return [] formats.extend(["intel", "fat32", "fat"]) elif cpu_arch == "ppc64": # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? if version > (10, 5) or version < (10, 4): return [] formats.append("fat64") elif cpu_arch == "ppc": if version > (10, 6): return [] formats.extend(["fat32", "fat"]) formats.append("universal") return formats def _mac_platforms(version=None, arch=None): version_str, _, cpu_arch = platform.mac_ver() if version is None: version = tuple(map(int, version_str.split(".")[:2])) if arch is None: arch = _mac_arch(cpu_arch) platforms = [] for minor_version in range(version[1], -1, -1): compat_version = version[0], minor_version binary_formats = _mac_binary_formats(compat_version, arch) for binary_format in binary_formats: platforms.append( "macosx_{major}_{minor}_{binary_format}".format( major=compat_version[0], minor=compat_version[1], binary_format=binary_format, ) ) return platforms # From PEP 513. def _is_manylinux_compatible(name, glibc_version): # Check for presence of _manylinux module. try: import _manylinux return bool(getattr(_manylinux, name + "_compatible")) except (ImportError, AttributeError): # Fall through to heuristic check below. pass return _have_compatible_glibc(*glibc_version) def _glibc_version_string(): # Returns glibc version string, or None if not using glibc. import ctypes # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen # manpage says, "If filename is NULL, then the returned handle is for the # main program". This way we can let the linker do the work to figure out # which libc our process is actually using. process_namespace = ctypes.CDLL(None) try: gnu_get_libc_version = process_namespace.gnu_get_libc_version except AttributeError: # Symbol doesn't exist -> therefore, we are not linked to # glibc. return None # Call gnu_get_libc_version, which returns a string like "2.5" gnu_get_libc_version.restype = ctypes.c_char_p version_str = gnu_get_libc_version() # py2 / py3 compatibility: if not isinstance(version_str, str): version_str = version_str.decode("ascii") return version_str # Separated out from have_compatible_glibc for easier unit testing. def _check_glibc_version(version_str, required_major, minimum_minor): # Parse string and check against requested version. # # We use a regexp instead of str.split because we want to discard any # random junk that might come after the minor version -- this might happen # in patched/forked versions of glibc (e.g. Linaro's version of glibc # uses version strings like "2.20-2014.11"). See gh-3588. m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str) if not m: warnings.warn( "Expected glibc version with 2 components major.minor," " got: %s" % version_str, RuntimeWarning, ) return False return ( int(m.group("major")) == required_major and int(m.group("minor")) >= minimum_minor ) def _have_compatible_glibc(required_major, minimum_minor): version_str = _glibc_version_string() if version_str is None: return False return _check_glibc_version(version_str, required_major, minimum_minor) def _linux_platforms(is_32bit=_32_BIT_INTERPRETER): linux = _normalize_string(distutils.util.get_platform()) if linux == "linux_x86_64" and is_32bit: linux = "linux_i686" manylinux_support = ( ("manylinux2014", (2, 17)), # CentOS 7 w/ glibc 2.17 (PEP 599) ("manylinux2010", (2, 12)), # CentOS 6 w/ glibc 2.12 (PEP 571) ("manylinux1", (2, 5)), # CentOS 5 w/ glibc 2.5 (PEP 513) ) manylinux_support_iter = iter(manylinux_support) for name, glibc_version in manylinux_support_iter: if _is_manylinux_compatible(name, glibc_version): platforms = [linux.replace("linux", name)] break else: platforms = [] # Support for a later manylinux implies support for an earlier version. platforms += [linux.replace("linux", name) for name, _ in manylinux_support_iter] platforms.append(linux) return platforms def _generic_platforms(): platform = _normalize_string(distutils.util.get_platform()) return [platform] def _interpreter_name(): name = platform.python_implementation().lower() return INTERPRETER_SHORT_NAMES.get(name) or name def _generic_interpreter(name, py_version): version = sysconfig.get_config_var("py_version_nodot") if not version: version = "".join(map(str, py_version[:2])) return "{name}{version}".format(name=name, version=version) def sys_tags(): """ Returns the sequence of tag triples for the running interpreter. The order of the sequence corresponds to priority order for the interpreter, from most to least important. """ py_version = sys.version_info[:2] interpreter_name = _interpreter_name() if platform.system() == "Darwin": platforms = _mac_platforms() elif platform.system() == "Linux": platforms = _linux_platforms() else: platforms = _generic_platforms() if interpreter_name == "cp": interpreter = _cpython_interpreter(py_version) abis = _cpython_abis(py_version) for tag in _cpython_tags(py_version, interpreter, abis, platforms): yield tag elif interpreter_name == "pp": interpreter = _pypy_interpreter() abi = _generic_abi() for tag in _pypy_tags(py_version, interpreter, abi, platforms): yield tag else: interpreter = _generic_interpreter(interpreter_name, py_version) abi = _generic_abi() for tag in _generic_tags(interpreter, py_version, abi, platforms): yield tag for tag in _independent_tags(interpreter, py_version, platforms): yield tag
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor/packaging/version.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import collections import itertools import re from ._structures import Infinity __all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] _Version = collections.namedtuple( "_Version", ["epoch", "release", "dev", "pre", "post", "local"] ) def parse(version): """ Parse the given version string and return either a :class:`Version` object or a :class:`LegacyVersion` object depending on if the given version is a valid PEP 440 version or a legacy version. """ try: return Version(version) except InvalidVersion: return LegacyVersion(version) class InvalidVersion(ValueError): """ An invalid version was found, users should refer to PEP 440. """ class _BaseVersion(object): def __hash__(self): return hash(self._key) def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ge__(self, other): return self._compare(other, lambda s, o: s >= o) def __gt__(self, other): return self._compare(other, lambda s, o: s > o) def __ne__(self, other): return self._compare(other, lambda s, o: s != o) def _compare(self, other, method): if not isinstance(other, _BaseVersion): return NotImplemented return method(self._key, other._key) class LegacyVersion(_BaseVersion): def __init__(self, version): self._version = str(version) self._key = _legacy_cmpkey(self._version) def __str__(self): return self._version def __repr__(self): return "<LegacyVersion({0})>".format(repr(str(self))) @property def public(self): return self._version @property def base_version(self): return self._version @property def epoch(self): return -1 @property def release(self): return None @property def pre(self): return None @property def post(self): return None @property def dev(self): return None @property def local(self): return None @property def is_prerelease(self): return False @property def is_postrelease(self): return False @property def is_devrelease(self): return False _legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) _legacy_version_replacement_map = { "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", } def _parse_version_parts(s): for part in _legacy_version_component_re.split(s): part = _legacy_version_replacement_map.get(part, part) if not part or part == ".": continue if part[:1] in "0123456789": # pad for numeric comparison yield part.zfill(8) else: yield "*" + part # ensure that alpha/beta/candidate are before final yield "*final" def _legacy_cmpkey(version): # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch # greater than or equal to 0. This will effectively put the LegacyVersion, # which uses the defacto standard originally implemented by setuptools, # as before all PEP 440 versions. epoch = -1 # This scheme is taken from pkg_resources.parse_version setuptools prior to # it's adoption of the packaging library. parts = [] for part in _parse_version_parts(version.lower()): if part.startswith("*"): # remove "-" before a prerelease tag if part < "*final": while parts and parts[-1] == "*final-": parts.pop() # remove trailing zeros from each series of numeric parts while parts and parts[-1] == "00000000": parts.pop() parts.append(part) parts = tuple(parts) return epoch, parts # Deliberately not anchored to the start and end of the string, to make it # easier for 3rd party code to reuse VERSION_PATTERN = r""" v? (?: (?:(?P<epoch>[0-9]+)!)? # epoch (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment (?P<pre> # pre-release [-_\.]? (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview)) [-_\.]? (?P<pre_n>[0-9]+)? )? (?P<post> # post release (?:-(?P<post_n1>[0-9]+)) | (?: [-_\.]? (?P<post_l>post|rev|r) [-_\.]? (?P<post_n2>[0-9]+)? ) )? (?P<dev> # dev release [-_\.]? (?P<dev_l>dev) [-_\.]? (?P<dev_n>[0-9]+)? )? ) (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version """ class Version(_BaseVersion): _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE) def __init__(self, version): # Validate the version and parse it into pieces match = self._regex.search(version) if not match: raise InvalidVersion("Invalid version: '{0}'".format(version)) # Store the parsed out pieces of the version self._version = _Version( epoch=int(match.group("epoch")) if match.group("epoch") else 0, release=tuple(int(i) for i in match.group("release").split(".")), pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")), post=_parse_letter_version( match.group("post_l"), match.group("post_n1") or match.group("post_n2") ), dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")), local=_parse_local_version(match.group("local")), ) # Generate a key which will be used for sorting self._key = _cmpkey( self._version.epoch, self._version.release, self._version.pre, self._version.post, self._version.dev, self._version.local, ) def __repr__(self): return "<Version({0})>".format(repr(str(self))) def __str__(self): parts = [] # Epoch if self.epoch != 0: parts.append("{0}!".format(self.epoch)) # Release segment parts.append(".".join(str(x) for x in self.release)) # Pre-release if self.pre is not None: parts.append("".join(str(x) for x in self.pre)) # Post-release if self.post is not None: parts.append(".post{0}".format(self.post)) # Development release if self.dev is not None: parts.append(".dev{0}".format(self.dev)) # Local version segment if self.local is not None: parts.append("+{0}".format(self.local)) return "".join(parts) @property def epoch(self): return self._version.epoch @property def release(self): return self._version.release @property def pre(self): return self._version.pre @property def post(self): return self._version.post[1] if self._version.post else None @property def dev(self): return self._version.dev[1] if self._version.dev else None @property def local(self): if self._version.local: return ".".join(str(x) for x in self._version.local) else: return None @property def public(self): return str(self).split("+", 1)[0] @property def base_version(self): parts = [] # Epoch if self.epoch != 0: parts.append("{0}!".format(self.epoch)) # Release segment parts.append(".".join(str(x) for x in self.release)) return "".join(parts) @property def is_prerelease(self): return self.dev is not None or self.pre is not None @property def is_postrelease(self): return self.post is not None @property def is_devrelease(self): return self.dev is not None def _parse_letter_version(letter, number): if letter: # We consider there to be an implicit 0 in a pre-release if there is # not a numeral associated with it. if number is None: number = 0 # We normalize any letters to their lower case form letter = letter.lower() # We consider some words to be alternate spellings of other words and # in those cases we want to normalize the spellings to our preferred # spelling. if letter == "alpha": letter = "a" elif letter == "beta": letter = "b" elif letter in ["c", "pre", "preview"]: letter = "rc" elif letter in ["rev", "r"]: letter = "post" return letter, int(number) if not letter and number: # We assume if we are given a number, but we are not given a letter # then this is using the implicit post release syntax (e.g. 1.0-1) letter = "post" return letter, int(number) _local_version_separators = re.compile(r"[\._-]") def _parse_local_version(local): """ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). """ if local is not None: return tuple( part.lower() if not part.isdigit() else int(part) for part in _local_version_separators.split(local) ) def _cmpkey(epoch, release, pre, post, dev, local): # When we compare a release version, we want to compare it with all of the # trailing zeros removed. So we'll use a reverse the list, drop all the now # leading zeros until we come to something non zero, then take the rest # re-reverse it back into the correct order and make it a tuple and use # that for our sorting key. release = tuple( reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release)))) ) # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. # We'll do this by abusing the pre segment, but we _only_ want to do this # if there is not a pre or a post segment. If we have one of those then # the normal sorting rules will handle this case correctly. if pre is None and post is None and dev is not None: pre = -Infinity # Versions without a pre-release (except as noted above) should sort after # those with one. elif pre is None: pre = Infinity # Versions without a post segment should sort before those with one. if post is None: post = -Infinity # Versions without a development segment should sort after those with one. if dev is None: dev = Infinity if local is None: # Versions without a local segment should sort before those with one. local = -Infinity else: # Versions with a local segment need that segment parsed to implement # the sorting rules in PEP440. # - Alpha numeric segments sort before numeric segments # - Alpha numeric segments sort lexicographically # - Numeric segments sort numerically # - Shorter versions sort before longer versions when the prefixes # match exactly local = tuple((i, "") if isinstance(i, int) else (-Infinity, i) for i in local) return epoch, release, pre, post, dev, local
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor/packaging/__init__.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function from .__about__ import ( __author__, __copyright__, __email__, __license__, __summary__, __title__, __uri__, __version__, ) __all__ = [ "__title__", "__summary__", "__uri__", "__version__", "__author__", "__email__", "__license__", "__copyright__", ]
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor/packaging/utils.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import re from .version import InvalidVersion, Version _canonicalize_regex = re.compile(r"[-_.]+") def canonicalize_name(name): # This is taken from PEP 503. return _canonicalize_regex.sub("-", name).lower() def canonicalize_version(version): """ This is very similar to Version.__str__, but has one subtle differences with the way it handles the release segment. """ try: version = Version(version) except InvalidVersion: # Legacy versions cannot be normalized return version parts = [] # Epoch if version.epoch != 0: parts.append("{0}!".format(version.epoch)) # Release segment # NB: This strips trailing '.0's to normalize parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in version.release))) # Pre-release if version.pre is not None: parts.append("".join(str(x) for x in version.pre)) # Post-release if version.post is not None: parts.append(".post{0}".format(version.post)) # Development release if version.dev is not None: parts.append(".dev{0}".format(version.dev)) # Local version segment if version.local is not None: parts.append("+{0}".format(version.local)) return "".join(parts)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor/packaging/requirements.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import string import re from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine from pkg_resources.extern.pyparsing import Literal as L # noqa from pkg_resources.extern.six.moves.urllib import parse as urlparse from .markers import MARKER_EXPR, Marker from .specifiers import LegacySpecifier, Specifier, SpecifierSet class InvalidRequirement(ValueError): """ An invalid requirement was found, users should refer to PEP 508. """ ALPHANUM = Word(string.ascii_letters + string.digits) LBRACKET = L("[").suppress() RBRACKET = L("]").suppress() LPAREN = L("(").suppress() RPAREN = L(")").suppress() COMMA = L(",").suppress() SEMICOLON = L(";").suppress() AT = L("@").suppress() PUNCTUATION = Word("-_.") IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) NAME = IDENTIFIER("name") EXTRA = IDENTIFIER URI = Regex(r"[^ ]+")("url") URL = AT + URI EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY VERSION_MANY = Combine( VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False )("_raw_spec") _VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)) _VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "") VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") MARKER_EXPR.setParseAction( lambda s, l, t: Marker(s[t._original_start : t._original_end]) ) MARKER_SEPARATOR = SEMICOLON MARKER = MARKER_SEPARATOR + MARKER_EXPR VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) URL_AND_MARKER = URL + Optional(MARKER) NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd # pkg_resources.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see # issue #104 REQUIREMENT.parseString("x[]") class Requirement(object): """Parse a requirement. Parse a given requirement string into its parts, such as name, specifier, URL, and extras. Raises InvalidRequirement on a badly-formed requirement string. """ # TODO: Can we test whether something is contained within a requirement? # If so how do we do that? Do we need to test against the _name_ of # the thing as well as the version? What about the markers? # TODO: Can we normalize the name and extra name? def __init__(self, requirement_string): try: req = REQUIREMENT.parseString(requirement_string) except ParseException as e: raise InvalidRequirement( 'Parse error at "{0!r}": {1}'.format( requirement_string[e.loc : e.loc + 8], e.msg ) ) self.name = req.name if req.url: parsed_url = urlparse.urlparse(req.url) if parsed_url.scheme == "file": if urlparse.urlunparse(parsed_url) != req.url: raise InvalidRequirement("Invalid URL given") elif not (parsed_url.scheme and parsed_url.netloc) or ( not parsed_url.scheme and not parsed_url.netloc ): raise InvalidRequirement("Invalid URL: {0}".format(req.url)) self.url = req.url else: self.url = None self.extras = set(req.extras.asList() if req.extras else []) self.specifier = SpecifierSet(req.specifier) self.marker = req.marker if req.marker else None def __str__(self): parts = [self.name] if self.extras: parts.append("[{0}]".format(",".join(sorted(self.extras)))) if self.specifier: parts.append(str(self.specifier)) if self.url: parts.append("@ {0}".format(self.url)) if self.marker: parts.append(" ") if self.marker: parts.append("; {0}".format(self.marker)) return "".join(parts) def __repr__(self): return "<Requirement({0!r})>".format(str(self))
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor/packaging/_structures.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function class Infinity(object): def __repr__(self): return "Infinity" def __hash__(self): return hash(repr(self)) def __lt__(self, other): return False def __le__(self, other): return False def __eq__(self, other): return isinstance(other, self.__class__) def __ne__(self, other): return not isinstance(other, self.__class__) def __gt__(self, other): return True def __ge__(self, other): return True def __neg__(self): return NegativeInfinity Infinity = Infinity() class NegativeInfinity(object): def __repr__(self): return "-Infinity" def __hash__(self): return hash(repr(self)) def __lt__(self, other): return True def __le__(self, other): return True def __eq__(self, other): return isinstance(other, self.__class__) def __ne__(self, other): return not isinstance(other, self.__class__) def __gt__(self, other): return False def __ge__(self, other): return False def __neg__(self): return Infinity NegativeInfinity = NegativeInfinity()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor/packaging/markers.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import operator import os import platform import sys from pkg_resources.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd from pkg_resources.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString from pkg_resources.extern.pyparsing import Literal as L # noqa from ._compat import string_types from .specifiers import Specifier, InvalidSpecifier __all__ = [ "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName", "Marker", "default_environment", ] class InvalidMarker(ValueError): """ An invalid marker was found, users should refer to PEP 508. """ class UndefinedComparison(ValueError): """ An invalid operation was attempted on a value that doesn't support it. """ class UndefinedEnvironmentName(ValueError): """ A name was attempted to be used that does not exist inside of the environment. """ class Node(object): def __init__(self, value): self.value = value def __str__(self): return str(self.value) def __repr__(self): return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) def serialize(self): raise NotImplementedError class Variable(Node): def serialize(self): return str(self) class Value(Node): def serialize(self): return '"{0}"'.format(self) class Op(Node): def serialize(self): return str(self) VARIABLE = ( L("implementation_version") | L("platform_python_implementation") | L("implementation_name") | L("python_full_version") | L("platform_release") | L("platform_version") | L("platform_machine") | L("platform_system") | L("python_version") | L("sys_platform") | L("os_name") | L("os.name") | L("sys.platform") # PEP-345 | L("platform.version") # PEP-345 | L("platform.machine") # PEP-345 | L("platform.python_implementation") # PEP-345 | L("python_implementation") # PEP-345 | L("extra") # undocumented setuptools legacy ) ALIASES = { "os.name": "os_name", "sys.platform": "sys_platform", "platform.version": "platform_version", "platform.machine": "platform_machine", "platform.python_implementation": "platform_python_implementation", "python_implementation": "platform_python_implementation", } VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) VERSION_CMP = ( L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") ) MARKER_OP = VERSION_CMP | L("not in") | L("in") MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) MARKER_VALUE = QuotedString("'") | QuotedString('"') MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) BOOLOP = L("and") | L("or") MARKER_VAR = VARIABLE | MARKER_VALUE MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) LPAREN = L("(").suppress() RPAREN = L(")").suppress() MARKER_EXPR = Forward() MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) MARKER = stringStart + MARKER_EXPR + stringEnd def _coerce_parse_result(results): if isinstance(results, ParseResults): return [_coerce_parse_result(i) for i in results] else: return results def _format_marker(marker, first=True): assert isinstance(marker, (list, tuple, string_types)) # Sometimes we have a structure like [[...]] which is a single item list # where the single item is itself it's own list. In that case we want skip # the rest of this function so that we don't get extraneous () on the # outside. if ( isinstance(marker, list) and len(marker) == 1 and isinstance(marker[0], (list, tuple)) ): return _format_marker(marker[0]) if isinstance(marker, list): inner = (_format_marker(m, first=False) for m in marker) if first: return " ".join(inner) else: return "(" + " ".join(inner) + ")" elif isinstance(marker, tuple): return " ".join([m.serialize() for m in marker]) else: return marker _operators = { "in": lambda lhs, rhs: lhs in rhs, "not in": lambda lhs, rhs: lhs not in rhs, "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def _eval_op(lhs, op, rhs): try: spec = Specifier("".join([op.serialize(), rhs])) except InvalidSpecifier: pass else: return spec.contains(lhs) oper = _operators.get(op.serialize()) if oper is None: raise UndefinedComparison( "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs) ) return oper(lhs, rhs) _undefined = object() def _get_env(environment, name): value = environment.get(name, _undefined) if value is _undefined: raise UndefinedEnvironmentName( "{0!r} does not exist in evaluation environment.".format(name) ) return value def _evaluate_markers(markers, environment): groups = [[]] for marker in markers: assert isinstance(marker, (list, tuple, string_types)) if isinstance(marker, list): groups[-1].append(_evaluate_markers(marker, environment)) elif isinstance(marker, tuple): lhs, op, rhs = marker if isinstance(lhs, Variable): lhs_value = _get_env(environment, lhs.value) rhs_value = rhs.value else: lhs_value = lhs.value rhs_value = _get_env(environment, rhs.value) groups[-1].append(_eval_op(lhs_value, op, rhs_value)) else: assert marker in ["and", "or"] if marker == "or": groups.append([]) return any(all(item) for item in groups) def format_full_version(info): version = "{0.major}.{0.minor}.{0.micro}".format(info) kind = info.releaselevel if kind != "final": version += kind[0] + str(info.serial) return version def default_environment(): if hasattr(sys, "implementation"): iver = format_full_version(sys.implementation.version) implementation_name = sys.implementation.name else: iver = "0" implementation_name = "" return { "implementation_name": implementation_name, "implementation_version": iver, "os_name": os.name, "platform_machine": platform.machine(), "platform_release": platform.release(), "platform_system": platform.system(), "platform_version": platform.version(), "python_full_version": platform.python_version(), "platform_python_implementation": platform.python_implementation(), "python_version": ".".join(platform.python_version_tuple()[:2]), "sys_platform": sys.platform, } class Marker(object): def __init__(self, marker): try: self._markers = _coerce_parse_result(MARKER.parseString(marker)) except ParseException as e: err_str = "Invalid marker: {0!r}, parse error at {1!r}".format( marker, marker[e.loc : e.loc + 8] ) raise InvalidMarker(err_str) def __str__(self): return _format_marker(self._markers) def __repr__(self): return "<Marker({0!r})>".format(str(self)) def evaluate(self, environment=None): """Evaluate a marker. Return the boolean from evaluating the given marker against the environment. environment is an optional argument to override all or part of the determined environment. The environment is determined from the current Python process. """ current_environment = default_environment() if environment is not None: current_environment.update(environment) return _evaluate_markers(self._markers, current_environment)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor/packaging/__about__.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function __all__ = [ "__title__", "__summary__", "__uri__", "__version__", "__author__", "__email__", "__license__", "__copyright__", ] __title__ = "packaging" __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" __version__ = "19.2" __author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" __license__ = "BSD or Apache License, Version 2.0" __copyright__ = "Copyright 2014-2019 %s" % __author__
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor/packaging/_compat.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import sys PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 # flake8: noqa if PY3: string_types = (str,) else: string_types = (basestring,) def with_metaclass(meta, *bases): """ Create a base class with a metaclass. """ # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, "temporary_class", (), {})
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/_vendor/packaging/specifiers.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import abc import functools import itertools import re from ._compat import string_types, with_metaclass from .version import Version, LegacyVersion, parse class InvalidSpecifier(ValueError): """ An invalid specifier was found, users should refer to PEP 440. """ class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): @abc.abstractmethod def __str__(self): """ Returns the str representation of this Specifier like object. This should be representative of the Specifier itself. """ @abc.abstractmethod def __hash__(self): """ Returns a hash value for this Specifier like object. """ @abc.abstractmethod def __eq__(self, other): """ Returns a boolean representing whether or not the two Specifier like objects are equal. """ @abc.abstractmethod def __ne__(self, other): """ Returns a boolean representing whether or not the two Specifier like objects are not equal. """ @abc.abstractproperty def prereleases(self): """ Returns whether or not pre-releases as a whole are allowed by this specifier. """ @prereleases.setter def prereleases(self, value): """ Sets whether or not pre-releases as a whole are allowed by this specifier. """ @abc.abstractmethod def contains(self, item, prereleases=None): """ Determines if the given item is contained within this specifier. """ @abc.abstractmethod def filter(self, iterable, prereleases=None): """ Takes an iterable of items and filters them so that only items which are contained within this specifier are allowed in it. """ class _IndividualSpecifier(BaseSpecifier): _operators = {} def __init__(self, spec="", prereleases=None): match = self._regex.search(spec) if not match: raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) self._spec = (match.group("operator").strip(), match.group("version").strip()) # Store whether or not this Specifier should accept prereleases self._prereleases = prereleases def __repr__(self): pre = ( ", prereleases={0!r}".format(self.prereleases) if self._prereleases is not None else "" ) return "<{0}({1!r}{2})>".format(self.__class__.__name__, str(self), pre) def __str__(self): return "{0}{1}".format(*self._spec) def __hash__(self): return hash(self._spec) def __eq__(self, other): if isinstance(other, string_types): try: other = self.__class__(other) except InvalidSpecifier: return NotImplemented elif not isinstance(other, self.__class__): return NotImplemented return self._spec == other._spec def __ne__(self, other): if isinstance(other, string_types): try: other = self.__class__(other) except InvalidSpecifier: return NotImplemented elif not isinstance(other, self.__class__): return NotImplemented return self._spec != other._spec def _get_operator(self, op): return getattr(self, "_compare_{0}".format(self._operators[op])) def _coerce_version(self, version): if not isinstance(version, (LegacyVersion, Version)): version = parse(version) return version @property def operator(self): return self._spec[0] @property def version(self): return self._spec[1] @property def prereleases(self): return self._prereleases @prereleases.setter def prereleases(self, value): self._prereleases = value def __contains__(self, item): return self.contains(item) def contains(self, item, prereleases=None): # Determine if prereleases are to be allowed or not. if prereleases is None: prereleases = self.prereleases # Normalize item to a Version or LegacyVersion, this allows us to have # a shortcut for ``"2.0" in Specifier(">=2") item = self._coerce_version(item) # Determine if we should be supporting prereleases in this specifier # or not, if we do not support prereleases than we can short circuit # logic if this version is a prereleases. if item.is_prerelease and not prereleases: return False # Actually do the comparison to determine if this item is contained # within this Specifier or not. return self._get_operator(self.operator)(item, self.version) def filter(self, iterable, prereleases=None): yielded = False found_prereleases = [] kw = {"prereleases": prereleases if prereleases is not None else True} # Attempt to iterate over all the values in the iterable and if any of # them match, yield them. for version in iterable: parsed_version = self._coerce_version(version) if self.contains(parsed_version, **kw): # If our version is a prerelease, and we were not set to allow # prereleases, then we'll store it for later incase nothing # else matches this specifier. if parsed_version.is_prerelease and not ( prereleases or self.prereleases ): found_prereleases.append(version) # Either this is not a prerelease, or we should have been # accepting prereleases from the beginning. else: yielded = True yield version # Now that we've iterated over everything, determine if we've yielded # any values, and if we have not and we have any prereleases stored up # then we will go ahead and yield the prereleases. if not yielded and found_prereleases: for version in found_prereleases: yield version class LegacySpecifier(_IndividualSpecifier): _regex_str = r""" (?P<operator>(==|!=|<=|>=|<|>)) \s* (?P<version> [^,;\s)]* # Since this is a "legacy" specifier, and the version # string can be just about anything, we match everything # except for whitespace, a semi-colon for marker support, # a closing paren since versions can be enclosed in # them, and a comma since it's a version separator. ) """ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) _operators = { "==": "equal", "!=": "not_equal", "<=": "less_than_equal", ">=": "greater_than_equal", "<": "less_than", ">": "greater_than", } def _coerce_version(self, version): if not isinstance(version, LegacyVersion): version = LegacyVersion(str(version)) return version def _compare_equal(self, prospective, spec): return prospective == self._coerce_version(spec) def _compare_not_equal(self, prospective, spec): return prospective != self._coerce_version(spec) def _compare_less_than_equal(self, prospective, spec): return prospective <= self._coerce_version(spec) def _compare_greater_than_equal(self, prospective, spec): return prospective >= self._coerce_version(spec) def _compare_less_than(self, prospective, spec): return prospective < self._coerce_version(spec) def _compare_greater_than(self, prospective, spec): return prospective > self._coerce_version(spec) def _require_version_compare(fn): @functools.wraps(fn) def wrapped(self, prospective, spec): if not isinstance(prospective, Version): return False return fn(self, prospective, spec) return wrapped class Specifier(_IndividualSpecifier): _regex_str = r""" (?P<operator>(~=|==|!=|<=|>=|<|>|===)) (?P<version> (?: # The identity operators allow for an escape hatch that will # do an exact string match of the version you wish to install. # This will not be parsed by PEP 440 and we cannot determine # any semantic meaning from it. This operator is discouraged # but included entirely as an escape hatch. (?<====) # Only match for the identity operator \s* [^\s]* # We just match everything, except for whitespace # since we are only testing for strict identity. ) | (?: # The (non)equality operators allow for wild card and local # versions to be specified so we have to define these two # operators separately to enable that. (?<===|!=) # Only match for equals and not equals \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? # You cannot use a wild card and a dev or local version # together so group them with a | and make them optional. (?: (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local | \.\* # Wild card syntax of .* )? ) | (?: # The compatible operator requires at least two digits in the # release segment. (?<=~=) # Only match for the compatible operator \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release ) | (?: # All other operators only allow a sub set of what the # (non)equality operators do. Specifically they do not allow # local versions to be specified nor do they allow the prefix # matching wild cards. (?<!==|!=|~=) # We have special cases for these # operators so we want to make sure they # don't match here. \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release ) ) """ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) _operators = { "~=": "compatible", "==": "equal", "!=": "not_equal", "<=": "less_than_equal", ">=": "greater_than_equal", "<": "less_than", ">": "greater_than", "===": "arbitrary", } @_require_version_compare def _compare_compatible(self, prospective, spec): # Compatible releases have an equivalent combination of >= and ==. That # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to # implement this in terms of the other specifiers instead of # implementing it ourselves. The only thing we need to do is construct # the other specifiers. # We want everything but the last item in the version, but we want to # ignore post and dev releases and we want to treat the pre-release as # it's own separate segment. prefix = ".".join( list( itertools.takewhile( lambda x: (not x.startswith("post") and not x.startswith("dev")), _version_split(spec), ) )[:-1] ) # Add the prefix notation to the end of our string prefix += ".*" return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( prospective, prefix ) @_require_version_compare def _compare_equal(self, prospective, spec): # We need special logic to handle prefix matching if spec.endswith(".*"): # In the case of prefix matching we want to ignore local segment. prospective = Version(prospective.public) # Split the spec out by dots, and pretend that there is an implicit # dot in between a release segment and a pre-release segment. spec = _version_split(spec[:-2]) # Remove the trailing .* # Split the prospective version out by dots, and pretend that there # is an implicit dot in between a release segment and a pre-release # segment. prospective = _version_split(str(prospective)) # Shorten the prospective version to be the same length as the spec # so that we can determine if the specifier is a prefix of the # prospective version or not. prospective = prospective[: len(spec)] # Pad out our two sides with zeros so that they both equal the same # length. spec, prospective = _pad_version(spec, prospective) else: # Convert our spec string into a Version spec = Version(spec) # If the specifier does not have a local segment, then we want to # act as if the prospective version also does not have a local # segment. if not spec.local: prospective = Version(prospective.public) return prospective == spec @_require_version_compare def _compare_not_equal(self, prospective, spec): return not self._compare_equal(prospective, spec) @_require_version_compare def _compare_less_than_equal(self, prospective, spec): return prospective <= Version(spec) @_require_version_compare def _compare_greater_than_equal(self, prospective, spec): return prospective >= Version(spec) @_require_version_compare def _compare_less_than(self, prospective, spec): # Convert our spec to a Version instance, since we'll want to work with # it as a version. spec = Version(spec) # Check to see if the prospective version is less than the spec # version. If it's not we can short circuit and just return False now # instead of doing extra unneeded work. if not prospective < spec: return False # This special case is here so that, unless the specifier itself # includes is a pre-release version, that we do not accept pre-release # versions for the version mentioned in the specifier (e.g. <3.1 should # not match 3.1.dev0, but should match 3.0.dev0). if not spec.is_prerelease and prospective.is_prerelease: if Version(prospective.base_version) == Version(spec.base_version): return False # If we've gotten to here, it means that prospective version is both # less than the spec version *and* it's not a pre-release of the same # version in the spec. return True @_require_version_compare def _compare_greater_than(self, prospective, spec): # Convert our spec to a Version instance, since we'll want to work with # it as a version. spec = Version(spec) # Check to see if the prospective version is greater than the spec # version. If it's not we can short circuit and just return False now # instead of doing extra unneeded work. if not prospective > spec: return False # This special case is here so that, unless the specifier itself # includes is a post-release version, that we do not accept # post-release versions for the version mentioned in the specifier # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). if not spec.is_postrelease and prospective.is_postrelease: if Version(prospective.base_version) == Version(spec.base_version): return False # Ensure that we do not allow a local version of the version mentioned # in the specifier, which is technically greater than, to match. if prospective.local is not None: if Version(prospective.base_version) == Version(spec.base_version): return False # If we've gotten to here, it means that prospective version is both # greater than the spec version *and* it's not a pre-release of the # same version in the spec. return True def _compare_arbitrary(self, prospective, spec): return str(prospective).lower() == str(spec).lower() @property def prereleases(self): # If there is an explicit prereleases set for this, then we'll just # blindly use that. if self._prereleases is not None: return self._prereleases # Look at all of our specifiers and determine if they are inclusive # operators, and if they are if they are including an explicit # prerelease. operator, version = self._spec if operator in ["==", ">=", "<=", "~=", "==="]: # The == specifier can include a trailing .*, if it does we # want to remove before parsing. if operator == "==" and version.endswith(".*"): version = version[:-2] # Parse the version, and if it is a pre-release than this # specifier allows pre-releases. if parse(version).is_prerelease: return True return False @prereleases.setter def prereleases(self, value): self._prereleases = value _prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") def _version_split(version): result = [] for item in version.split("."): match = _prefix_regex.search(item) if match: result.extend(match.groups()) else: result.append(item) return result def _pad_version(left, right): left_split, right_split = [], [] # Get the release segment of our versions left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) # Get the rest of our versions left_split.append(left[len(left_split[0]) :]) right_split.append(right[len(right_split[0]) :]) # Insert our padding left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) class SpecifierSet(BaseSpecifier): def __init__(self, specifiers="", prereleases=None): # Split on , to break each indidivual specifier into it's own item, and # strip each item to remove leading/trailing whitespace. specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] # Parsed each individual specifier, attempting first to make it a # Specifier and falling back to a LegacySpecifier. parsed = set() for specifier in specifiers: try: parsed.add(Specifier(specifier)) except InvalidSpecifier: parsed.add(LegacySpecifier(specifier)) # Turn our parsed specifiers into a frozen set and save them for later. self._specs = frozenset(parsed) # Store our prereleases value so we can use it later to determine if # we accept prereleases or not. self._prereleases = prereleases def __repr__(self): pre = ( ", prereleases={0!r}".format(self.prereleases) if self._prereleases is not None else "" ) return "<SpecifierSet({0!r}{1})>".format(str(self), pre) def __str__(self): return ",".join(sorted(str(s) for s in self._specs)) def __hash__(self): return hash(self._specs) def __and__(self, other): if isinstance(other, string_types): other = SpecifierSet(other) elif not isinstance(other, SpecifierSet): return NotImplemented specifier = SpecifierSet() specifier._specs = frozenset(self._specs | other._specs) if self._prereleases is None and other._prereleases is not None: specifier._prereleases = other._prereleases elif self._prereleases is not None and other._prereleases is None: specifier._prereleases = self._prereleases elif self._prereleases == other._prereleases: specifier._prereleases = self._prereleases else: raise ValueError( "Cannot combine SpecifierSets with True and False prerelease " "overrides." ) return specifier def __eq__(self, other): if isinstance(other, string_types): other = SpecifierSet(other) elif isinstance(other, _IndividualSpecifier): other = SpecifierSet(str(other)) elif not isinstance(other, SpecifierSet): return NotImplemented return self._specs == other._specs def __ne__(self, other): if isinstance(other, string_types): other = SpecifierSet(other) elif isinstance(other, _IndividualSpecifier): other = SpecifierSet(str(other)) elif not isinstance(other, SpecifierSet): return NotImplemented return self._specs != other._specs def __len__(self): return len(self._specs) def __iter__(self): return iter(self._specs) @property def prereleases(self): # If we have been given an explicit prerelease modifier, then we'll # pass that through here. if self._prereleases is not None: return self._prereleases # If we don't have any specifiers, and we don't have a forced value, # then we'll just return None since we don't know if this should have # pre-releases or not. if not self._specs: return None # Otherwise we'll see if any of the given specifiers accept # prereleases, if any of them do we'll return True, otherwise False. return any(s.prereleases for s in self._specs) @prereleases.setter def prereleases(self, value): self._prereleases = value def __contains__(self, item): return self.contains(item) def contains(self, item, prereleases=None): # Ensure that our item is a Version or LegacyVersion instance. if not isinstance(item, (LegacyVersion, Version)): item = parse(item) # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the # SpecifierSet thinks for whether or not we should support prereleases. if prereleases is None: prereleases = self.prereleases # We can determine if we're going to allow pre-releases by looking to # see if any of the underlying items supports them. If none of them do # and this item is a pre-release then we do not allow it and we can # short circuit that here. # Note: This means that 1.0.dev1 would not be contained in something # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 if not prereleases and item.is_prerelease: return False # We simply dispatch to the underlying specs here to make sure that the # given version is contained within all of them. # Note: This use of all() here means that an empty set of specifiers # will always return True, this is an explicit design decision. return all(s.contains(item, prereleases=prereleases) for s in self._specs) def filter(self, iterable, prereleases=None): # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the # SpecifierSet thinks for whether or not we should support prereleases. if prereleases is None: prereleases = self.prereleases # If we have any specifiers, then we want to wrap our iterable in the # filter method for each one, this will act as a logical AND amongst # each specifier. if self._specs: for spec in self._specs: iterable = spec.filter(iterable, prereleases=bool(prereleases)) return iterable # If we do not have any specifiers, then we need to have a rough filter # which will filter out any pre-releases, unless there are no final # releases, and which will filter out LegacyVersion in general. else: filtered = [] found_prereleases = [] for item in iterable: # Ensure that we some kind of Version class for this item. if not isinstance(item, (LegacyVersion, Version)): parsed_version = parse(item) else: parsed_version = item # Filter out any item which is parsed as a LegacyVersion if isinstance(parsed_version, LegacyVersion): continue # Store any item which is a pre-release for later unless we've # already found a final version or we are accepting prereleases if parsed_version.is_prerelease and not prereleases: if not filtered: found_prereleases.append(item) else: filtered.append(item) # If we've found no items except for pre-releases, then we'll go # ahead and use the pre-releases if not filtered and found_prereleases and prereleases is None: return found_prereleases return filtered
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pkg_resources/extern/__init__.py
import sys class VendorImporter: """ A PEP 302 meta path importer for finding optionally-vendored or otherwise naturally-installed packages from root_name. """ def __init__(self, root_name, vendored_names=(), vendor_pkg=None): self.root_name = root_name self.vendored_names = set(vendored_names) self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor') @property def search_path(self): """ Search first the vendor package then as a natural package. """ yield self.vendor_pkg + '.' yield '' def find_module(self, fullname, path=None): """ Return self when fullname starts with root_name and the target module is one vendored through this importer. """ root, base, target = fullname.partition(self.root_name + '.') if root: return if not any(map(target.startswith, self.vendored_names)): return return self def load_module(self, fullname): """ Iterate over the search path to locate and load fullname. """ root, base, target = fullname.partition(self.root_name + '.') for prefix in self.search_path: try: extant = prefix + target __import__(extant) mod = sys.modules[extant] sys.modules[fullname] = mod return mod except ImportError: pass else: raise ImportError( "The '{target}' package is required; " "normally this is bundled with this package so if you get " "this warning, consult the packager of your " "distribution.".format(**locals()) ) def install(self): """ Install this importer into sys.meta_path if not already present. """ if self not in sys.meta_path: sys.meta_path.append(self) names = 'packaging', 'pyparsing', 'six', 'appdirs' VendorImporter(__name__, names).install()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/itsdangerous/_json.py
try: import simplejson as json except ImportError: import json class _CompactJSON(object): """Wrapper around json module that strips whitespace.""" @staticmethod def loads(payload): return json.loads(payload) @staticmethod def dumps(obj, **kwargs): kwargs.setdefault("ensure_ascii", False) kwargs.setdefault("separators", (",", ":")) return json.dumps(obj, **kwargs)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/itsdangerous/jws.py
import hashlib import time from datetime import datetime from ._compat import number_types from ._json import _CompactJSON from ._json import json from .encoding import base64_decode from .encoding import base64_encode from .encoding import want_bytes from .exc import BadData from .exc import BadHeader from .exc import BadPayload from .exc import BadSignature from .exc import SignatureExpired from .serializer import Serializer from .signer import HMACAlgorithm from .signer import NoneAlgorithm class JSONWebSignatureSerializer(Serializer): """This serializer implements JSON Web Signature (JWS) support. Only supports the JWS Compact Serialization. """ jws_algorithms = { "HS256": HMACAlgorithm(hashlib.sha256), "HS384": HMACAlgorithm(hashlib.sha384), "HS512": HMACAlgorithm(hashlib.sha512), "none": NoneAlgorithm(), } #: The default algorithm to use for signature generation default_algorithm = "HS512" default_serializer = _CompactJSON def __init__( self, secret_key, salt=None, serializer=None, serializer_kwargs=None, signer=None, signer_kwargs=None, algorithm_name=None, ): Serializer.__init__( self, secret_key=secret_key, salt=salt, serializer=serializer, serializer_kwargs=serializer_kwargs, signer=signer, signer_kwargs=signer_kwargs, ) if algorithm_name is None: algorithm_name = self.default_algorithm self.algorithm_name = algorithm_name self.algorithm = self.make_algorithm(algorithm_name) def load_payload(self, payload, serializer=None, return_header=False): payload = want_bytes(payload) if b"." not in payload: raise BadPayload('No "." found in value') base64d_header, base64d_payload = payload.split(b".", 1) try: json_header = base64_decode(base64d_header) except Exception as e: raise BadHeader( "Could not base64 decode the header because of an exception", original_error=e, ) try: json_payload = base64_decode(base64d_payload) except Exception as e: raise BadPayload( "Could not base64 decode the payload because of an exception", original_error=e, ) try: header = Serializer.load_payload(self, json_header, serializer=json) except BadData as e: raise BadHeader( "Could not unserialize header because it was malformed", original_error=e, ) if not isinstance(header, dict): raise BadHeader("Header payload is not a JSON object", header=header) payload = Serializer.load_payload(self, json_payload, serializer=serializer) if return_header: return payload, header return payload def dump_payload(self, header, obj): base64d_header = base64_encode( self.serializer.dumps(header, **self.serializer_kwargs) ) base64d_payload = base64_encode( self.serializer.dumps(obj, **self.serializer_kwargs) ) return base64d_header + b"." + base64d_payload def make_algorithm(self, algorithm_name): try: return self.jws_algorithms[algorithm_name] except KeyError: raise NotImplementedError("Algorithm not supported") def make_signer(self, salt=None, algorithm=None): if salt is None: salt = self.salt key_derivation = "none" if salt is None else None if algorithm is None: algorithm = self.algorithm return self.signer( self.secret_key, salt=salt, sep=".", key_derivation=key_derivation, algorithm=algorithm, ) def make_header(self, header_fields): header = header_fields.copy() if header_fields else {} header["alg"] = self.algorithm_name return header def dumps(self, obj, salt=None, header_fields=None): """Like :meth:`.Serializer.dumps` but creates a JSON Web Signature. It also allows for specifying additional fields to be included in the JWS header. """ header = self.make_header(header_fields) signer = self.make_signer(salt, self.algorithm) return signer.sign(self.dump_payload(header, obj)) def loads(self, s, salt=None, return_header=False): """Reverse of :meth:`dumps`. If requested via ``return_header`` it will return a tuple of payload and header. """ payload, header = self.load_payload( self.make_signer(salt, self.algorithm).unsign(want_bytes(s)), return_header=True, ) if header.get("alg") != self.algorithm_name: raise BadHeader("Algorithm mismatch", header=header, payload=payload) if return_header: return payload, header return payload def loads_unsafe(self, s, salt=None, return_header=False): kwargs = {"return_header": return_header} return self._loads_unsafe_impl(s, salt, kwargs, kwargs) class TimedJSONWebSignatureSerializer(JSONWebSignatureSerializer): """Works like the regular :class:`JSONWebSignatureSerializer` but also records the time of the signing and can be used to expire signatures. JWS currently does not specify this behavior but it mentions a possible extension like this in the spec. Expiry date is encoded into the header similar to what's specified in `draft-ietf-oauth -json-web-token <http://self-issued.info/docs/draft-ietf-oauth-json -web-token.html#expDef>`_. """ DEFAULT_EXPIRES_IN = 3600 def __init__(self, secret_key, expires_in=None, **kwargs): JSONWebSignatureSerializer.__init__(self, secret_key, **kwargs) if expires_in is None: expires_in = self.DEFAULT_EXPIRES_IN self.expires_in = expires_in def make_header(self, header_fields): header = JSONWebSignatureSerializer.make_header(self, header_fields) iat = self.now() exp = iat + self.expires_in header["iat"] = iat header["exp"] = exp return header def loads(self, s, salt=None, return_header=False): payload, header = JSONWebSignatureSerializer.loads( self, s, salt, return_header=True ) if "exp" not in header: raise BadSignature("Missing expiry date", payload=payload) int_date_error = BadHeader("Expiry date is not an IntDate", payload=payload) try: header["exp"] = int(header["exp"]) except ValueError: raise int_date_error if header["exp"] < 0: raise int_date_error if header["exp"] < self.now(): raise SignatureExpired( "Signature expired", payload=payload, date_signed=self.get_issue_date(header), ) if return_header: return payload, header return payload def get_issue_date(self, header): rv = header.get("iat") if isinstance(rv, number_types): return datetime.utcfromtimestamp(int(rv)) def now(self): return int(time.time())
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/itsdangerous/timed.py
import time from datetime import datetime from ._compat import text_type from .encoding import base64_decode from .encoding import base64_encode from .encoding import bytes_to_int from .encoding import int_to_bytes from .encoding import want_bytes from .exc import BadSignature from .exc import BadTimeSignature from .exc import SignatureExpired from .serializer import Serializer from .signer import Signer class TimestampSigner(Signer): """Works like the regular :class:`.Signer` but also records the time of the signing and can be used to expire signatures. The :meth:`unsign` method can raise :exc:`.SignatureExpired` if the unsigning failed because the signature is expired. """ def get_timestamp(self): """Returns the current timestamp. The function must return an integer. """ return int(time.time()) def timestamp_to_datetime(self, ts): """Used to convert the timestamp from :meth:`get_timestamp` into a datetime object. """ return datetime.utcfromtimestamp(ts) def sign(self, value): """Signs the given string and also attaches time information.""" value = want_bytes(value) timestamp = base64_encode(int_to_bytes(self.get_timestamp())) sep = want_bytes(self.sep) value = value + sep + timestamp return value + sep + self.get_signature(value) def unsign(self, value, max_age=None, return_timestamp=False): """Works like the regular :meth:`.Signer.unsign` but can also validate the time. See the base docstring of the class for the general behavior. If ``return_timestamp`` is ``True`` the timestamp of the signature will be returned as a naive :class:`datetime.datetime` object in UTC. """ try: result = Signer.unsign(self, value) sig_error = None except BadSignature as e: sig_error = e result = e.payload or b"" sep = want_bytes(self.sep) # If there is no timestamp in the result there is something # seriously wrong. In case there was a signature error, we raise # that one directly, otherwise we have a weird situation in # which we shouldn't have come except someone uses a time-based # serializer on non-timestamp data, so catch that. if sep not in result: if sig_error: raise sig_error raise BadTimeSignature("timestamp missing", payload=result) value, timestamp = result.rsplit(sep, 1) try: timestamp = bytes_to_int(base64_decode(timestamp)) except Exception: timestamp = None # Signature is *not* okay. Raise a proper error now that we have # split the value and the timestamp. if sig_error is not None: raise BadTimeSignature( text_type(sig_error), payload=value, date_signed=timestamp ) # Signature was okay but the timestamp is actually not there or # malformed. Should not happen, but we handle it anyway. if timestamp is None: raise BadTimeSignature("Malformed timestamp", payload=value) # Check timestamp is not older than max_age if max_age is not None: age = self.get_timestamp() - timestamp if age > max_age: raise SignatureExpired( "Signature age %s > %s seconds" % (age, max_age), payload=value, date_signed=self.timestamp_to_datetime(timestamp), ) if return_timestamp: return value, self.timestamp_to_datetime(timestamp) return value def validate(self, signed_value, max_age=None): """Only validates the given signed value. Returns ``True`` if the signature exists and is valid.""" try: self.unsign(signed_value, max_age=max_age) return True except BadSignature: return False class TimedSerializer(Serializer): """Uses :class:`TimestampSigner` instead of the default :class:`.Signer`. """ default_signer = TimestampSigner def loads(self, s, max_age=None, return_timestamp=False, salt=None): """Reverse of :meth:`dumps`, raises :exc:`.BadSignature` if the signature validation fails. If a ``max_age`` is provided it will ensure the signature is not older than that time in seconds. In case the signature is outdated, :exc:`.SignatureExpired` is raised. All arguments are forwarded to the signer's :meth:`~TimestampSigner.unsign` method. """ s = want_bytes(s) last_exception = None for signer in self.iter_unsigners(salt): try: base64d, timestamp = signer.unsign(s, max_age, return_timestamp=True) payload = self.load_payload(base64d) if return_timestamp: return payload, timestamp return payload # If we get a signature expired it means we could read the # signature but it's invalid. In that case we do not want to # try the next signer. except SignatureExpired: raise except BadSignature as err: last_exception = err raise last_exception def loads_unsafe(self, s, max_age=None, salt=None): load_kwargs = {"max_age": max_age} load_payload_kwargs = {} return self._loads_unsafe_impl(s, salt, load_kwargs, load_payload_kwargs)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/itsdangerous/encoding.py
import base64 import string import struct from ._compat import text_type from .exc import BadData def want_bytes(s, encoding="utf-8", errors="strict"): if isinstance(s, text_type): s = s.encode(encoding, errors) return s def base64_encode(string): """Base64 encode a string of bytes or text. The resulting bytes are safe to use in URLs. """ string = want_bytes(string) return base64.urlsafe_b64encode(string).rstrip(b"=") def base64_decode(string): """Base64 decode a URL-safe string of bytes or text. The result is bytes. """ string = want_bytes(string, encoding="ascii", errors="ignore") string += b"=" * (-len(string) % 4) try: return base64.urlsafe_b64decode(string) except (TypeError, ValueError): raise BadData("Invalid base64-encoded data") # The alphabet used by base64.urlsafe_* _base64_alphabet = (string.ascii_letters + string.digits + "-_=").encode("ascii") _int64_struct = struct.Struct(">Q") _int_to_bytes = _int64_struct.pack _bytes_to_int = _int64_struct.unpack def int_to_bytes(num): return _int_to_bytes(num).lstrip(b"\x00") def bytes_to_int(bytestr): return _bytes_to_int(bytestr.rjust(8, b"\x00"))[0]
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/itsdangerous/__init__.py
from ._json import json from .encoding import base64_decode from .encoding import base64_encode from .encoding import want_bytes from .exc import BadData from .exc import BadHeader from .exc import BadPayload from .exc import BadSignature from .exc import BadTimeSignature from .exc import SignatureExpired from .jws import JSONWebSignatureSerializer from .jws import TimedJSONWebSignatureSerializer from .serializer import Serializer from .signer import HMACAlgorithm from .signer import NoneAlgorithm from .signer import Signer from .timed import TimedSerializer from .timed import TimestampSigner from .url_safe import URLSafeSerializer from .url_safe import URLSafeTimedSerializer __version__ = "1.1.0"
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/itsdangerous/url_safe.py
import zlib from ._json import _CompactJSON from .encoding import base64_decode from .encoding import base64_encode from .exc import BadPayload from .serializer import Serializer from .timed import TimedSerializer class URLSafeSerializerMixin(object): """Mixed in with a regular serializer it will attempt to zlib compress the string to make it shorter if necessary. It will also base64 encode the string so that it can safely be placed in a URL. """ default_serializer = _CompactJSON def load_payload(self, payload, *args, **kwargs): decompress = False if payload.startswith(b"."): payload = payload[1:] decompress = True try: json = base64_decode(payload) except Exception as e: raise BadPayload( "Could not base64 decode the payload because of an exception", original_error=e, ) if decompress: try: json = zlib.decompress(json) except Exception as e: raise BadPayload( "Could not zlib decompress the payload before decoding the payload", original_error=e, ) return super(URLSafeSerializerMixin, self).load_payload(json, *args, **kwargs) def dump_payload(self, obj): json = super(URLSafeSerializerMixin, self).dump_payload(obj) is_compressed = False compressed = zlib.compress(json) if len(compressed) < (len(json) - 1): json = compressed is_compressed = True base64d = base64_encode(json) if is_compressed: base64d = b"." + base64d return base64d class URLSafeSerializer(URLSafeSerializerMixin, Serializer): """Works like :class:`.Serializer` but dumps and loads into a URL safe string consisting of the upper and lowercase character of the alphabet as well as ``'_'``, ``'-'`` and ``'.'``. """ class URLSafeTimedSerializer(URLSafeSerializerMixin, TimedSerializer): """Works like :class:`.TimedSerializer` but dumps and loads into a URL safe string consisting of the upper and lowercase character of the alphabet as well as ``'_'``, ``'-'`` and ``'.'``. """
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/itsdangerous/exc.py
from ._compat import PY2 from ._compat import text_type class BadData(Exception): """Raised if bad data of any sort was encountered. This is the base for all exceptions that itsdangerous defines. .. versionadded:: 0.15 """ message = None def __init__(self, message): super(BadData, self).__init__(self, message) self.message = message def __str__(self): return text_type(self.message) if PY2: __unicode__ = __str__ def __str__(self): return self.__unicode__().encode("utf-8") class BadSignature(BadData): """Raised if a signature does not match.""" def __init__(self, message, payload=None): BadData.__init__(self, message) #: The payload that failed the signature test. In some #: situations you might still want to inspect this, even if #: you know it was tampered with. #: #: .. versionadded:: 0.14 self.payload = payload class BadTimeSignature(BadSignature): """Raised if a time-based signature is invalid. This is a subclass of :class:`BadSignature`. """ def __init__(self, message, payload=None, date_signed=None): BadSignature.__init__(self, message, payload) #: If the signature expired this exposes the date of when the #: signature was created. This can be helpful in order to #: tell the user how long a link has been gone stale. #: #: .. versionadded:: 0.14 self.date_signed = date_signed class SignatureExpired(BadTimeSignature): """Raised if a signature timestamp is older than ``max_age``. This is a subclass of :exc:`BadTimeSignature`. """ class BadHeader(BadSignature): """Raised if a signed header is invalid in some form. This only happens for serializers that have a header that goes with the signature. .. versionadded:: 0.24 """ def __init__(self, message, payload=None, header=None, original_error=None): BadSignature.__init__(self, message, payload) #: If the header is actually available but just malformed it #: might be stored here. self.header = header #: If available, the error that indicates why the payload was #: not valid. This might be ``None``. self.original_error = original_error class BadPayload(BadData): """Raised if a payload is invalid. This could happen if the payload is loaded despite an invalid signature, or if there is a mismatch between the serializer and deserializer. The original exception that occurred during loading is stored on as :attr:`original_error`. .. versionadded:: 0.15 """ def __init__(self, message, original_error=None): BadData.__init__(self, message) #: If available, the error that indicates why the payload was #: not valid. This might be ``None``. self.original_error = original_error
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/itsdangerous/serializer.py
import hashlib from ._compat import text_type from ._json import json from .encoding import want_bytes from .exc import BadPayload from .exc import BadSignature from .signer import Signer def is_text_serializer(serializer): """Checks whether a serializer generates text or binary.""" return isinstance(serializer.dumps({}), text_type) class Serializer(object): """This class provides a serialization interface on top of the signer. It provides a similar API to json/pickle and other modules but is structured differently internally. If you want to change the underlying implementation for parsing and loading you have to override the :meth:`load_payload` and :meth:`dump_payload` functions. This implementation uses simplejson if available for dumping and loading and will fall back to the standard library's json module if it's not available. You do not need to subclass this class in order to switch out or customize the :class:`.Signer`. You can instead pass a different class to the constructor as well as keyword arguments as a dict that should be forwarded. .. code-block:: python s = Serializer(signer_kwargs={'key_derivation': 'hmac'}) You may want to upgrade the signing parameters without invalidating existing signatures that are in use. Fallback signatures can be given that will be tried if unsigning with the current signer fails. Fallback signers can be defined by providing a list of ``fallback_signers``. Each item can be one of the following: a signer class (which is instantiated with ``signer_kwargs``, ``salt``, and ``secret_key``), a tuple ``(signer_class, signer_kwargs)``, or a dict of ``signer_kwargs``. For example, this is a serializer that signs using SHA-512, but will unsign using either SHA-512 or SHA1: .. code-block:: python s = Serializer( signer_kwargs={"digest_method": hashlib.sha512}, fallback_signers=[{"digest_method": hashlib.sha1}] ) .. versionchanged:: 0.14: The ``signer`` and ``signer_kwargs`` parameters were added to the constructor. .. versionchanged:: 1.1.0: Added support for ``fallback_signers`` and configured a default SHA-512 fallback. This fallback is for users who used the yanked 1.0.0 release which defaulted to SHA-512. """ #: If a serializer module or class is not passed to the constructor #: this one is picked up. This currently defaults to :mod:`json`. default_serializer = json #: The default :class:`Signer` class that is being used by this #: serializer. #: #: .. versionadded:: 0.14 default_signer = Signer #: The default fallback signers. default_fallback_signers = [{"digest_method": hashlib.sha512}] def __init__( self, secret_key, salt=b"itsdangerous", serializer=None, serializer_kwargs=None, signer=None, signer_kwargs=None, fallback_signers=None, ): self.secret_key = want_bytes(secret_key) self.salt = want_bytes(salt) if serializer is None: serializer = self.default_serializer self.serializer = serializer self.is_text_serializer = is_text_serializer(serializer) if signer is None: signer = self.default_signer self.signer = signer self.signer_kwargs = signer_kwargs or {} if fallback_signers is None: fallback_signers = list(self.default_fallback_signers or ()) self.fallback_signers = fallback_signers self.serializer_kwargs = serializer_kwargs or {} def load_payload(self, payload, serializer=None): """Loads the encoded object. This function raises :class:`.BadPayload` if the payload is not valid. The ``serializer`` parameter can be used to override the serializer stored on the class. The encoded ``payload`` should always be bytes. """ if serializer is None: serializer = self.serializer is_text = self.is_text_serializer else: is_text = is_text_serializer(serializer) try: if is_text: payload = payload.decode("utf-8") return serializer.loads(payload) except Exception as e: raise BadPayload( "Could not load the payload because an exception" " occurred on unserializing the data.", original_error=e, ) def dump_payload(self, obj): """Dumps the encoded object. The return value is always bytes. If the internal serializer returns text, the value will be encoded as UTF-8. """ return want_bytes(self.serializer.dumps(obj, **self.serializer_kwargs)) def make_signer(self, salt=None): """Creates a new instance of the signer to be used. The default implementation uses the :class:`.Signer` base class. """ if salt is None: salt = self.salt return self.signer(self.secret_key, salt=salt, **self.signer_kwargs) def iter_unsigners(self, salt=None): """Iterates over all signers to be tried for unsigning. Starts with the configured signer, then constructs each signer specified in ``fallback_signers``. """ if salt is None: salt = self.salt yield self.make_signer(salt) for fallback in self.fallback_signers: if type(fallback) is dict: kwargs = fallback fallback = self.signer elif type(fallback) is tuple: fallback, kwargs = fallback else: kwargs = self.signer_kwargs yield fallback(self.secret_key, salt=salt, **kwargs) def dumps(self, obj, salt=None): """Returns a signed string serialized with the internal serializer. The return value can be either a byte or unicode string depending on the format of the internal serializer. """ payload = want_bytes(self.dump_payload(obj)) rv = self.make_signer(salt).sign(payload) if self.is_text_serializer: rv = rv.decode("utf-8") return rv def dump(self, obj, f, salt=None): """Like :meth:`dumps` but dumps into a file. The file handle has to be compatible with what the internal serializer expects. """ f.write(self.dumps(obj, salt)) def loads(self, s, salt=None): """Reverse of :meth:`dumps`. Raises :exc:`.BadSignature` if the signature validation fails. """ s = want_bytes(s) last_exception = None for signer in self.iter_unsigners(salt): try: return self.load_payload(signer.unsign(s)) except BadSignature as err: last_exception = err raise last_exception def load(self, f, salt=None): """Like :meth:`loads` but loads from a file.""" return self.loads(f.read(), salt) def loads_unsafe(self, s, salt=None): """Like :meth:`loads` but without verifying the signature. This is potentially very dangerous to use depending on how your serializer works. The return value is ``(signature_valid, payload)`` instead of just the payload. The first item will be a boolean that indicates if the signature is valid. This function never fails. Use it for debugging only and if you know that your serializer module is not exploitable (for example, do not use it with a pickle serializer). .. versionadded:: 0.15 """ return self._loads_unsafe_impl(s, salt) def _loads_unsafe_impl(self, s, salt, load_kwargs=None, load_payload_kwargs=None): """Low level helper function to implement :meth:`loads_unsafe` in serializer subclasses. """ try: return True, self.loads(s, salt=salt, **(load_kwargs or {})) except BadSignature as e: if e.payload is None: return False, None try: return ( False, self.load_payload(e.payload, **(load_payload_kwargs or {})), ) except BadPayload: return False, None def load_unsafe(self, f, *args, **kwargs): """Like :meth:`loads_unsafe` but loads from a file. .. versionadded:: 0.15 """ return self.loads_unsafe(f.read(), *args, **kwargs)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/itsdangerous/_compat.py
import decimal import hmac import numbers import sys PY2 = sys.version_info[0] == 2 if PY2: from itertools import izip text_type = unicode # noqa: 821 else: izip = zip text_type = str number_types = (numbers.Real, decimal.Decimal) def _constant_time_compare(val1, val2): """Return ``True`` if the two strings are equal, ``False`` otherwise. The time taken is independent of the number of characters that match. Do not use this function for anything else than comparision with known length targets. This is should be implemented in C in order to get it completely right. This is an alias of :func:`hmac.compare_digest` on Python>=2.7,3.3. """ len_eq = len(val1) == len(val2) if len_eq: result = 0 left = val1 else: result = 1 left = val2 for x, y in izip(bytearray(left), bytearray(val2)): result |= x ^ y return result == 0 # Starting with 2.7/3.3 the standard library has a c-implementation for # constant time string compares. constant_time_compare = getattr(hmac, "compare_digest", _constant_time_compare)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/itsdangerous/signer.py
import hashlib import hmac from ._compat import constant_time_compare from .encoding import _base64_alphabet from .encoding import base64_decode from .encoding import base64_encode from .encoding import want_bytes from .exc import BadSignature class SigningAlgorithm(object): """Subclasses must implement :meth:`get_signature` to provide signature generation functionality. """ def get_signature(self, key, value): """Returns the signature for the given key and value.""" raise NotImplementedError() def verify_signature(self, key, value, sig): """Verifies the given signature matches the expected signature. """ return constant_time_compare(sig, self.get_signature(key, value)) class NoneAlgorithm(SigningAlgorithm): """Provides an algorithm that does not perform any signing and returns an empty signature. """ def get_signature(self, key, value): return b"" class HMACAlgorithm(SigningAlgorithm): """Provides signature generation using HMACs.""" #: The digest method to use with the MAC algorithm. This defaults to #: SHA1, but can be changed to any other function in the hashlib #: module. default_digest_method = staticmethod(hashlib.sha1) def __init__(self, digest_method=None): if digest_method is None: digest_method = self.default_digest_method self.digest_method = digest_method def get_signature(self, key, value): mac = hmac.new(key, msg=value, digestmod=self.digest_method) return mac.digest() class Signer(object): """This class can sign and unsign bytes, validating the signature provided. Salt can be used to namespace the hash, so that a signed string is only valid for a given namespace. Leaving this at the default value or re-using a salt value across different parts of your application where the same signed value in one part can mean something different in another part is a security risk. See :ref:`the-salt` for an example of what the salt is doing and how you can utilize it. .. versionadded:: 0.14 ``key_derivation`` and ``digest_method`` were added as arguments to the class constructor. .. versionadded:: 0.18 ``algorithm`` was added as an argument to the class constructor. """ #: The digest method to use for the signer. This defaults to #: SHA1 but can be changed to any other function in the hashlib #: module. #: #: .. versionadded:: 0.14 default_digest_method = staticmethod(hashlib.sha1) #: Controls how the key is derived. The default is Django-style #: concatenation. Possible values are ``concat``, ``django-concat`` #: and ``hmac``. This is used for deriving a key from the secret key #: with an added salt. #: #: .. versionadded:: 0.14 default_key_derivation = "django-concat" def __init__( self, secret_key, salt=None, sep=".", key_derivation=None, digest_method=None, algorithm=None, ): self.secret_key = want_bytes(secret_key) self.sep = want_bytes(sep) if self.sep in _base64_alphabet: raise ValueError( "The given separator cannot be used because it may be" " contained in the signature itself. Alphanumeric" " characters and `-_=` must not be used." ) self.salt = "itsdangerous.Signer" if salt is None else salt if key_derivation is None: key_derivation = self.default_key_derivation self.key_derivation = key_derivation if digest_method is None: digest_method = self.default_digest_method self.digest_method = digest_method if algorithm is None: algorithm = HMACAlgorithm(self.digest_method) self.algorithm = algorithm def derive_key(self): """This method is called to derive the key. The default key derivation choices can be overridden here. Key derivation is not intended to be used as a security method to make a complex key out of a short password. Instead you should use large random secret keys. """ salt = want_bytes(self.salt) if self.key_derivation == "concat": return self.digest_method(salt + self.secret_key).digest() elif self.key_derivation == "django-concat": return self.digest_method(salt + b"signer" + self.secret_key).digest() elif self.key_derivation == "hmac": mac = hmac.new(self.secret_key, digestmod=self.digest_method) mac.update(salt) return mac.digest() elif self.key_derivation == "none": return self.secret_key else: raise TypeError("Unknown key derivation method") def get_signature(self, value): """Returns the signature for the given value.""" value = want_bytes(value) key = self.derive_key() sig = self.algorithm.get_signature(key, value) return base64_encode(sig) def sign(self, value): """Signs the given string.""" return want_bytes(value) + want_bytes(self.sep) + self.get_signature(value) def verify_signature(self, value, sig): """Verifies the signature for the given value.""" key = self.derive_key() try: sig = base64_decode(sig) except Exception: return False return self.algorithm.verify_signature(key, value, sig) def unsign(self, signed_value): """Unsigns the given string.""" signed_value = want_bytes(signed_value) sep = want_bytes(self.sep) if sep not in signed_value: raise BadSignature("No %r found in value" % self.sep) value, sig = signed_value.rsplit(sep, 1) if self.verify_signature(value, sig): return value raise BadSignature("Signature %r does not match" % sig, payload=value) def validate(self, signed_value): """Only validates the given signed value. Returns ``True`` if the signature exists and is valid. """ try: self.unsign(signed_value) return True except BadSignature: return False
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pytz-2020.1.dist-info/zip-safe
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pytz-2020.1.dist-info/RECORD
pytz-2020.1.dist-info/DESCRIPTION.rst,sha256=ovmwqIt48fGpf9TczEWesZm0IiGilc_OZOjBt_tim30,19920 pytz-2020.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 pytz-2020.1.dist-info/LICENSE.txt,sha256=vosaN-vibFkqkPbA6zMQOn84POL010mMCvmlJpkKB7g,1088 pytz-2020.1.dist-info/METADATA,sha256=YCInVxuxMV-4Y7llMF0icBsY5VPgrqVL8Nm6ipywF-Q,21312 pytz-2020.1.dist-info/RECORD,, pytz-2020.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 pytz-2020.1.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110 pytz-2020.1.dist-info/metadata.json,sha256=OTL9L7SZoBhV2rLcStW7nL0ydZiGQN9EIjF1IntrpR8,1505 pytz-2020.1.dist-info/top_level.txt,sha256=6xRYlt934v1yHb1JIrXgHyGxn3cqACvd-yE8ski_kcc,5 pytz-2020.1.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 pytz/__init__.py,sha256=9_YKAsGiW6bwPEBkJvwaj0qDF5Sma1_3NsIQntvySoQ,34794 pytz/__pycache__/__init__.cpython-39.pyc,, pytz/__pycache__/exceptions.cpython-39.pyc,, pytz/__pycache__/lazy.cpython-39.pyc,, pytz/__pycache__/reference.cpython-39.pyc,, pytz/__pycache__/tzfile.cpython-39.pyc,, pytz/__pycache__/tzinfo.cpython-39.pyc,, pytz/exceptions.py,sha256=434ZcuLlpLQY9mWoGq7zJMV1TyiYvVgpKBU1qZkbDjM,1571 pytz/lazy.py,sha256=toeR5uDWKBj6ezsUZ4elNP6CEMtK7CO2jS9A30nsFbo,5404 pytz/reference.py,sha256=zUtCki7JFEmrzrjNsfMD7YL0lWDxynKc1Ubo4iXSs74,3778 pytz/tzfile.py,sha256=g2CMhXZ1PX2slgg5_Kk9TvmIkVKeOjbuONHEfZP6jMk,4745 pytz/tzinfo.py,sha256=-5UjW-yqHbtO5NtSaWope7EbSdf2oTES26Kdlxjqdk0,19272 pytz/zoneinfo/Africa/Abidjan,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 pytz/zoneinfo/Africa/Accra,sha256=Wu58Zep-DvaYnW1ahhz2jCDsTtOixcWpUWpYfa1NPqc,816 pytz/zoneinfo/Africa/Addis_Ababa,sha256=Uum8ISzpRaDh830iNkfRva-Rn6NTuuGHNWjig5C29Zo,251 pytz/zoneinfo/Africa/Algiers,sha256=k56yX_gxM4qFb7dez04soVIV-qD66Thd2PJxaS-ChHc,735 pytz/zoneinfo/Africa/Asmara,sha256=Uum8ISzpRaDh830iNkfRva-Rn6NTuuGHNWjig5C29Zo,251 pytz/zoneinfo/Africa/Asmera,sha256=Uum8ISzpRaDh830iNkfRva-Rn6NTuuGHNWjig5C29Zo,251 pytz/zoneinfo/Africa/Bamako,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 pytz/zoneinfo/Africa/Bangui,sha256=xqLNGqbjHZ1JuIHsEXP9ttXSb3v-GWp98SJ14pL6sUw,149 pytz/zoneinfo/Africa/Banjul,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 pytz/zoneinfo/Africa/Bissau,sha256=IjuxDP6EZiDHFvl_bHS6NN7sdRxLKXllooBC829poak,194 pytz/zoneinfo/Africa/Blantyre,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 pytz/zoneinfo/Africa/Brazzaville,sha256=xqLNGqbjHZ1JuIHsEXP9ttXSb3v-GWp98SJ14pL6sUw,149 pytz/zoneinfo/Africa/Bujumbura,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 pytz/zoneinfo/Africa/Cairo,sha256=L6zLQLnQtLkEELOGfm6USaHY33qAEPgGV822-iU1vxc,1955 pytz/zoneinfo/Africa/Casablanca,sha256=4RqVbw_F3ZucopIC2ivAJ8WDwj5wRODAB67tBpdXcgA,2429 pytz/zoneinfo/Africa/Ceuta,sha256=jp7xqONgZ3NPnElHzJEVusHKM9rxDK1nxJm4-i7Ln8o,2036 pytz/zoneinfo/Africa/Conakry,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 pytz/zoneinfo/Africa/Dakar,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 pytz/zoneinfo/Africa/Dar_es_Salaam,sha256=Uum8ISzpRaDh830iNkfRva-Rn6NTuuGHNWjig5C29Zo,251 pytz/zoneinfo/Africa/Djibouti,sha256=Uum8ISzpRaDh830iNkfRva-Rn6NTuuGHNWjig5C29Zo,251 pytz/zoneinfo/Africa/Douala,sha256=xqLNGqbjHZ1JuIHsEXP9ttXSb3v-GWp98SJ14pL6sUw,149 pytz/zoneinfo/Africa/El_Aaiun,sha256=UWCCqQLJxd8qsTYw82kz9W1suwW5TRgnZw31sDWDz20,2295 pytz/zoneinfo/Africa/Freetown,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 pytz/zoneinfo/Africa/Gaborone,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 pytz/zoneinfo/Africa/Harare,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 pytz/zoneinfo/Africa/Johannesburg,sha256=bBvMdSZo53WFowiuhUO9C8zY6BOGViboCb-U8_49l34,246 pytz/zoneinfo/Africa/Juba,sha256=TC4SaGEzvtDtdyU6lfxdqVQqDsNklvVokhqHZt_YteU,653 pytz/zoneinfo/Africa/Kampala,sha256=Uum8ISzpRaDh830iNkfRva-Rn6NTuuGHNWjig5C29Zo,251 pytz/zoneinfo/Africa/Khartoum,sha256=MYWDoJ3AcCItZdApoeOgtWWDDxquwTon5v5TOGP70-o,679 pytz/zoneinfo/Africa/Kigali,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 pytz/zoneinfo/Africa/Kinshasa,sha256=xqLNGqbjHZ1JuIHsEXP9ttXSb3v-GWp98SJ14pL6sUw,149 pytz/zoneinfo/Africa/Lagos,sha256=xqLNGqbjHZ1JuIHsEXP9ttXSb3v-GWp98SJ14pL6sUw,149 pytz/zoneinfo/Africa/Libreville,sha256=xqLNGqbjHZ1JuIHsEXP9ttXSb3v-GWp98SJ14pL6sUw,149 pytz/zoneinfo/Africa/Lome,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 pytz/zoneinfo/Africa/Luanda,sha256=xqLNGqbjHZ1JuIHsEXP9ttXSb3v-GWp98SJ14pL6sUw,149 pytz/zoneinfo/Africa/Lubumbashi,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 pytz/zoneinfo/Africa/Lusaka,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 pytz/zoneinfo/Africa/Malabo,sha256=xqLNGqbjHZ1JuIHsEXP9ttXSb3v-GWp98SJ14pL6sUw,149 pytz/zoneinfo/Africa/Maputo,sha256=k_GelVHViGiuWCB1LSyTpIYSTDZEY9yclInQRY-LxoI,149 pytz/zoneinfo/Africa/Maseru,sha256=bBvMdSZo53WFowiuhUO9C8zY6BOGViboCb-U8_49l34,246 pytz/zoneinfo/Africa/Mbabane,sha256=bBvMdSZo53WFowiuhUO9C8zY6BOGViboCb-U8_49l34,246 pytz/zoneinfo/Africa/Mogadishu,sha256=Uum8ISzpRaDh830iNkfRva-Rn6NTuuGHNWjig5C29Zo,251 pytz/zoneinfo/Africa/Monrovia,sha256=-VsJW5cU4KdvfgYaQVv4lcuzmaKIVFMd42nO6RXOBdU,208 pytz/zoneinfo/Africa/Nairobi,sha256=Uum8ISzpRaDh830iNkfRva-Rn6NTuuGHNWjig5C29Zo,251 pytz/zoneinfo/Africa/Ndjamena,sha256=8T3A0Zm9Gj0Bvm6rd88t3GAXKiKdGUfHlIqYlkYI0KM,199 pytz/zoneinfo/Africa/Niamey,sha256=xqLNGqbjHZ1JuIHsEXP9ttXSb3v-GWp98SJ14pL6sUw,149 pytz/zoneinfo/Africa/Nouakchott,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 pytz/zoneinfo/Africa/Ouagadougou,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 pytz/zoneinfo/Africa/Porto-Novo,sha256=xqLNGqbjHZ1JuIHsEXP9ttXSb3v-GWp98SJ14pL6sUw,149 pytz/zoneinfo/Africa/Sao_Tome,sha256=MdjxpQ268uzJ7Zx1ZroFUtRUwqsJ6F_yY3AYV9FXw1I,254 pytz/zoneinfo/Africa/Timbuktu,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 pytz/zoneinfo/Africa/Tripoli,sha256=W1dptGD70T7ppGoo0fczFQeDiIp0nultLNPV66MwB2c,625 pytz/zoneinfo/Africa/Tunis,sha256=OFVMEM4eYT2Ez0beuhEUCTSIpcFldWxsV2uEoTZIUNI,689 pytz/zoneinfo/Africa/Windhoek,sha256=xuhvudrMH4alnVmouSTQI8YL8F_HbgsF2EQ7AZKzuHs,955 pytz/zoneinfo/America/Adak,sha256=IB1DhwJQAKbhPJ9jHLf8zW5Dad7HIkBS-dhv64E1OlM,2356 pytz/zoneinfo/America/Anchorage,sha256=oZA1NSPS2BWdymYpnCHFO8BlYVS-ll5KLg2Ez9CbETs,2371 pytz/zoneinfo/America/Anguilla,sha256=17gT2eOVMFKJF_sypwDPudkFwGEijrRfkBU-aK3FL60,148 pytz/zoneinfo/America/Antigua,sha256=17gT2eOVMFKJF_sypwDPudkFwGEijrRfkBU-aK3FL60,148 pytz/zoneinfo/America/Araguaina,sha256=kppiiytmSQeesflyNGYM3r8NVUl1C-ggu08s9_Tt-co,884 pytz/zoneinfo/America/Argentina/Buenos_Aires,sha256=ntn_GFHadbrFJ4ZuhU6h2uzbFwmDyS9mXV5S28pkGF8,1076 pytz/zoneinfo/America/Argentina/Catamarca,sha256=diH1f96kbbY-7gJYQnSCNHs3n9dwHJqUhSdGNx1L7I0,1076 pytz/zoneinfo/America/Argentina/ComodRivadavia,sha256=diH1f96kbbY-7gJYQnSCNHs3n9dwHJqUhSdGNx1L7I0,1076 pytz/zoneinfo/America/Argentina/Cordoba,sha256=1XqIP8Qo2bPR7909hrAI-qAttybmwEW4ms7FjZA5Yfw,1076 pytz/zoneinfo/America/Argentina/Jujuy,sha256=5HR0TlZFifwJ5nLTmg7yWXgCTx9mRhahfs4_Wq70wOY,1048 pytz/zoneinfo/America/Argentina/La_Rioja,sha256=Zf_E3akFE1YUt9MZ4xxbRnOrp2bH1D-Bjsc0SLFfRyU,1090 pytz/zoneinfo/America/Argentina/Mendoza,sha256=5DJiYYeQpcLBR_IoIJtk43IswJeGYawx5GykszuJ-Nw,1076 pytz/zoneinfo/America/Argentina/Rio_Gallegos,sha256=T97WADwva6JbxICviNQUt_7iw9c-nloI4QJCscENSck,1076 pytz/zoneinfo/America/Argentina/Salta,sha256=ATw0uR6szWKPs6jzdn6revS7UxCXD26ORK6jlmsjL18,1048 pytz/zoneinfo/America/Argentina/San_Juan,sha256=qlW693a0Tnofy-RdcVBuWY3DvTTGxWwcYdKU3Y98pX8,1090 pytz/zoneinfo/America/Argentina/San_Luis,sha256=WYdcro5-Fe-N6LkQsKwx_1tVozmnBp58DO1-BJs2suo,1102 pytz/zoneinfo/America/Argentina/Tucuman,sha256=wsjg1a5AM1dP2gjr112k3vt54trcOOM_StF74xzvBJc,1104 pytz/zoneinfo/America/Argentina/Ushuaia,sha256=9548Vvq_kpw_NX5s65vYuIbqvwGV-PBxqwmcrflLI0U,1076 pytz/zoneinfo/America/Aruba,sha256=ZGEIylAZ5iy_rIBsXREtH_ZfWRIkLI9dQjP_EIyn3sY,186 pytz/zoneinfo/America/Asuncion,sha256=FTLtFk6MjJoh5VIDgJ2Sf4B_iNeCDxrV0MWwQL-sOVM,2044 pytz/zoneinfo/America/Atikokan,sha256=4a94GtPHUdQ-2sdz9WinsKn9V_QiM4XmFj48FTPMeSA,336 pytz/zoneinfo/America/Atka,sha256=IB1DhwJQAKbhPJ9jHLf8zW5Dad7HIkBS-dhv64E1OlM,2356 pytz/zoneinfo/America/Bahia,sha256=cmLkSAAzINlzYGXBqADEU3uPgA9S5nt-p1AV3Zy86VY,1024 pytz/zoneinfo/America/Bahia_Banderas,sha256=BNjbcHSlPsJ4UpJx-gs1hpIyx2ScBieh1nyDuGb0PcE,1546 pytz/zoneinfo/America/Barbados,sha256=Y0XwBAv5eSAZHNixVIhRvPxNWqP2a1FCH2_z2Vrd-sc,314 pytz/zoneinfo/America/Belem,sha256=_258hQZLCEXBX8xRLyQSw-AE-jiDmjVwJX32mN5UUEk,576 pytz/zoneinfo/America/Belize,sha256=4i5I2QwrrKxujvw_Siu4Nvmb-hySGAjx-Kpkfyt7WGE,948 pytz/zoneinfo/America/Blanc-Sablon,sha256=tVN5ZPmIO3vc3_ayowg6qbvjheg4OJtDFT9y8IuW334,298 pytz/zoneinfo/America/Boa_Vista,sha256=V4VVOkrFUV1qUfVp9E974IOJFmA5QxQrctatTBEb-hs,632 pytz/zoneinfo/America/Bogota,sha256=ZaQKTZi35AMdlROs0vjEDA_phR8ztJOnjA8aLJZ5tHw,246 pytz/zoneinfo/America/Boise,sha256=Yv4AXa2nSH_oVo3FZqZCR7V7z7c6WnQgKIUyNUpzGXA,2394 pytz/zoneinfo/America/Buenos_Aires,sha256=ntn_GFHadbrFJ4ZuhU6h2uzbFwmDyS9mXV5S28pkGF8,1076 pytz/zoneinfo/America/Cambridge_Bay,sha256=Nanl8yH4SshljhEjDe-PZCYEXbUuuZGmkbAAt2dB-bk,2084 pytz/zoneinfo/America/Campo_Grande,sha256=5BBENR3_8gJp4F_Uj2RRknvRc4JJWNRPnZU9E7tb8QI,1444 pytz/zoneinfo/America/Cancun,sha256=YR2U5T6mDGd5xm8EVA_TM1NwSRMYPNYWvV7wuthnX0I,782 pytz/zoneinfo/America/Caracas,sha256=2NpwXPEtQkI82WCZuQWHXf66VCADcawMpfhKTsuA0x4,264 pytz/zoneinfo/America/Catamarca,sha256=diH1f96kbbY-7gJYQnSCNHs3n9dwHJqUhSdGNx1L7I0,1076 pytz/zoneinfo/America/Cayenne,sha256=atVbW5ChJiKQ_q-3kFs-DLTTZa9ptkiHkmJlq4AXoY4,198 pytz/zoneinfo/America/Cayman,sha256=kayA_pdpMcSQ0FjIzotdcf-m1JYfbKE-qcFT8LC8zqA,182 pytz/zoneinfo/America/Chicago,sha256=4aZFw-svkMyXmSpNufqzK-xveos-oVJDpEyI8Yu9HQE,3576 pytz/zoneinfo/America/Chihuahua,sha256=cewXJyEw4KCoz33yl8o2tUJZmugBWH4R0Aovdmuqf-o,1484 pytz/zoneinfo/America/Coral_Harbour,sha256=4a94GtPHUdQ-2sdz9WinsKn9V_QiM4XmFj48FTPMeSA,336 pytz/zoneinfo/America/Cordoba,sha256=1XqIP8Qo2bPR7909hrAI-qAttybmwEW4ms7FjZA5Yfw,1076 pytz/zoneinfo/America/Costa_Rica,sha256=74rYa6lrgIkyls9PkHo8SCYl9oOqiuG5S7MWdnJelP4,316 pytz/zoneinfo/America/Creston,sha256=dNOa71QgQ2d5uh7cl-xZme-8u3nMR9GJ7PSktWIDORQ,208 pytz/zoneinfo/America/Cuiaba,sha256=M0FsR8T9s4jFSuzD8Qi6pqtb6Rf2NTzyVHKGZrn56n4,1416 pytz/zoneinfo/America/Curacao,sha256=ZGEIylAZ5iy_rIBsXREtH_ZfWRIkLI9dQjP_EIyn3sY,186 pytz/zoneinfo/America/Danmarkshavn,sha256=YRZAfUCoVtaL1L-MYMYMH1wyOaVQnfUo_gFnvMXSuzw,698 pytz/zoneinfo/America/Dawson,sha256=hhsgSPBS_LCElwD7wIUTR9GDyuNO1_wqMntjJ8WdegQ,1600 pytz/zoneinfo/America/Dawson_Creek,sha256=aJXCyP4j3ggE4wGCN-LrS9hpD_5zWHzQTeSAKTWEPUM,1050 pytz/zoneinfo/America/Denver,sha256=6_yPo1_mvnt9DgpPzr0QdHsjdsfUG6ALnagQLML1DSM,2444 pytz/zoneinfo/America/Detroit,sha256=hecz8yqY2Cj5B61G3gLZdAVZvRgK9l0P90c_gN-uD5g,2230 pytz/zoneinfo/America/Dominica,sha256=17gT2eOVMFKJF_sypwDPudkFwGEijrRfkBU-aK3FL60,148 pytz/zoneinfo/America/Edmonton,sha256=-TkIfc3QlvaCf0p8COZ43Y1HRBAl-nARUi-JdXeK1vE,2332 pytz/zoneinfo/America/Eirunepe,sha256=pS90HZzRwH4Tf8ugmKHfiphX7zCPqZkh_0CNb-fEMAM,656 pytz/zoneinfo/America/El_Salvador,sha256=gvGN8Lkj-sGm2_rs8OUjAMf1oMtKp2Xes6UfWT0WqgU,224 pytz/zoneinfo/America/Ensenada,sha256=OHHtvy3J70z6wvKBHgPqMEnGs6SXp8fkf0WX9ZiOODk,2342 pytz/zoneinfo/America/Fort_Nelson,sha256=erfODr3DrSpz65kAdO7Ts2dGbZxvddEP6gx4BX3y2J0,2240 pytz/zoneinfo/America/Fort_Wayne,sha256=GrNub1_3Um5Qh67wOx58_TEAz4fwAeAlk2AlMTVA_sI,1666 pytz/zoneinfo/America/Fortaleza,sha256=mITuMrRLRTWyoiF04Oy_UZ8gxZofTpXDblM8t7ch7Sg,716 pytz/zoneinfo/America/Glace_Bay,sha256=G8DGLGCapH_aYCF_OhaL5Qonf7FOAgAPwelO5htCWBc,2192 pytz/zoneinfo/America/Godthab,sha256=FtlXWP_hBNuwBHkI2b1yne_tSUJpwLtWLyTHZoFZkmM,1878 pytz/zoneinfo/America/Goose_Bay,sha256=JgaLueghSvX2g725FOfIgpgvsqxZGykWOhAZWGpQZRY,3210 pytz/zoneinfo/America/Grand_Turk,sha256=ds3WfGxsBby0eTHik_PEHmmwS0uF-Rd5YdCcb_SAnLw,1848 pytz/zoneinfo/America/Grenada,sha256=17gT2eOVMFKJF_sypwDPudkFwGEijrRfkBU-aK3FL60,148 pytz/zoneinfo/America/Guadeloupe,sha256=17gT2eOVMFKJF_sypwDPudkFwGEijrRfkBU-aK3FL60,148 pytz/zoneinfo/America/Guatemala,sha256=dugUgCd6QY52yHkHuUP4jRWzo5x439IQigaYCvEF46Q,280 pytz/zoneinfo/America/Guayaquil,sha256=PbcF4bvGAm-aFwdtGPotJy3kb4NwoyWwxgwL98BeUWA,246 pytz/zoneinfo/America/Guyana,sha256=mDyb0FtGOpwGPq864vAHX22LY0Pxex94f1wVMyo36d0,236 pytz/zoneinfo/America/Halifax,sha256=TZpmc5PwWoLfTfQoQ_b3U17BE2iVKSeNkR0Ho8mbTn8,3424 pytz/zoneinfo/America/Havana,sha256=HUQeAuKBsEkI5SLZjqynXICOUVOajkKzKH5r-Ov5Odc,2416 pytz/zoneinfo/America/Hermosillo,sha256=9Ij30JYmMscC1XHi4o9v-uSXoUuE8V9zhGz2iV5hVFI,416 pytz/zoneinfo/America/Indiana/Indianapolis,sha256=GrNub1_3Um5Qh67wOx58_TEAz4fwAeAlk2AlMTVA_sI,1666 pytz/zoneinfo/America/Indiana/Knox,sha256=BiALShjiOLg1o8mMRWJ1jyTlJkgvwzte7B9WSOvTUNg,2428 pytz/zoneinfo/America/Indiana/Marengo,sha256=CPYY3XgJFNEzONxei7x04wOGI_b86RAn4jBPewi1HZw,1722 pytz/zoneinfo/America/Indiana/Petersburg,sha256=axot1SloP27ZWjezmo7kldu9qA2frEtPVqWngcXtft0,1904 pytz/zoneinfo/America/Indiana/Tell_City,sha256=GrWNjb1i4sbIYlJ8fU0viJ2Q5JmrlvLgcLQILnk3El8,1684 pytz/zoneinfo/America/Indiana/Vevay,sha256=GGosHbQUoIDOKPZxdal42X40veEITMmrnlKOnLUhb-c,1414 pytz/zoneinfo/America/Indiana/Vincennes,sha256=gh7LAbHbMD92eo9C_c5IiwQ1fJvxhdJN402Q_4YJdLg,1694 pytz/zoneinfo/America/Indiana/Winamac,sha256=yS-_aKSC4crd0WdNutkHRHxUjmBCU56QVQcqy7kYpbQ,1778 pytz/zoneinfo/America/Indianapolis,sha256=GrNub1_3Um5Qh67wOx58_TEAz4fwAeAlk2AlMTVA_sI,1666 pytz/zoneinfo/America/Inuvik,sha256=MU_oDiidQaijt1KV0B5h9LqHoCrJ8ieldD9tsiJiX5o,1894 pytz/zoneinfo/America/Iqaluit,sha256=6PitEMSFWcSb-Io8fvm4oQ_7v39G_qANc6reTjXoZJ0,2032 pytz/zoneinfo/America/Jamaica,sha256=wlagieUPRf5-beie-h7QsONbNzjGsm8vMs8uf28pw28,482 pytz/zoneinfo/America/Jujuy,sha256=5HR0TlZFifwJ5nLTmg7yWXgCTx9mRhahfs4_Wq70wOY,1048 pytz/zoneinfo/America/Juneau,sha256=k7hxb0aGRnfnE-DBi3LkcjAzRPyAf0_Hw0vVFfjGeb0,2353 pytz/zoneinfo/America/Kentucky/Louisville,sha256=-yqgeeHZdq6oP3_WzVvYOmqV9HQv8y7ZWmc9bzHvJAY,2772 pytz/zoneinfo/America/Kentucky/Monticello,sha256=NJMKjG7jjlRzZhndMPw51bYW0D3jviW2Qbl70YcU0Gg,2352 pytz/zoneinfo/America/Knox_IN,sha256=BiALShjiOLg1o8mMRWJ1jyTlJkgvwzte7B9WSOvTUNg,2428 pytz/zoneinfo/America/Kralendijk,sha256=ZGEIylAZ5iy_rIBsXREtH_ZfWRIkLI9dQjP_EIyn3sY,186 pytz/zoneinfo/America/La_Paz,sha256=PAGF2VU_QOw2xT1Cqdp2P8Aj9hXMVWlCByV7cvfIQ_k,232 pytz/zoneinfo/America/Lima,sha256=JHDCg95uw6BEu4a4Gfyikm1s8rm8AsYPG8dJxQQNZFs,406 pytz/zoneinfo/America/Los_Angeles,sha256=VOy1PikdjiVdJ7lukVGzwl8uDxV_KYqznkTm5BLEiDM,2836 pytz/zoneinfo/America/Louisville,sha256=-yqgeeHZdq6oP3_WzVvYOmqV9HQv8y7ZWmc9bzHvJAY,2772 pytz/zoneinfo/America/Lower_Princes,sha256=ZGEIylAZ5iy_rIBsXREtH_ZfWRIkLI9dQjP_EIyn3sY,186 pytz/zoneinfo/America/Maceio,sha256=pzjNghmeHhvF4aI3cDq2G_5t71BSNGIbRAF5NmJyDmw,744 pytz/zoneinfo/America/Managua,sha256=xBzF01AHn2E2fD8Qdy-DHFe36UqoeNpKPfChduBKWdk,430 pytz/zoneinfo/America/Manaus,sha256=lp6RlkcXJQ7mSsKqnEgC8svJVrFDJk_16xxvfpNSpK4,604 pytz/zoneinfo/America/Marigot,sha256=17gT2eOVMFKJF_sypwDPudkFwGEijrRfkBU-aK3FL60,148 pytz/zoneinfo/America/Martinique,sha256=fMs80kOU2YFvC0f9y2eje97JeAtTYBamXrnlTunNLzQ,232 pytz/zoneinfo/America/Matamoros,sha256=RlEMOT_zvCLQ8s7TNvRE2PnC4H9JrxO7MGxmfu5xPPI,1390 pytz/zoneinfo/America/Mazatlan,sha256=aIyre-8trAXSHtqxbuu6gDDkWCUjI_SdAKPIjz74M2E,1526 pytz/zoneinfo/America/Mendoza,sha256=5DJiYYeQpcLBR_IoIJtk43IswJeGYawx5GykszuJ-Nw,1076 pytz/zoneinfo/America/Menominee,sha256=Arv9WLbfhNcpRsUjHDU757BEdwlp08Gt30AixG3gZ04,2274 pytz/zoneinfo/America/Merida,sha256=BJQ5mzAT-akb_EA7WqGdNheCorDqLBnDS_4X3YJz0rc,1422 pytz/zoneinfo/America/Metlakatla,sha256=twmieGTVY2V-U8nFxqvx7asYv8GVjeWdLtrOI7UApVI,1423 pytz/zoneinfo/America/Mexico_City,sha256=DSpTe5TT0KBsxGx79Rs7ah-zJpiGOJKwPjztovRN0b4,1584 pytz/zoneinfo/America/Miquelon,sha256=LNbkN87EnZUa41Xizko5VIN55EyQvf5Kk5b5AfNQG8Q,1666 pytz/zoneinfo/America/Moncton,sha256=Wmv-bk9aKKcWWzOpc1UFu67HOfwaIk2Wmh3LgqGctys,3154 pytz/zoneinfo/America/Monterrey,sha256=HA4yn9jQHk9i0PqiB7fSoFdzXtB1DT1cheGRPXrQNdQ,1390 pytz/zoneinfo/America/Montevideo,sha256=4jcgTegK5X8F0yNYzk-3oySZ4U9XQ09UbTJ_mlu8N70,1510 pytz/zoneinfo/America/Montreal,sha256=ggOSzbHkmfgu9wTQzP0MUKsrKMbgveuAeThh1eFl1a0,3494 pytz/zoneinfo/America/Montserrat,sha256=17gT2eOVMFKJF_sypwDPudkFwGEijrRfkBU-aK3FL60,148 pytz/zoneinfo/America/Nassau,sha256=TLJ7tbx0ZIFwvAYgF97DWv9eJ4hmeb5n3RJPWI-uOyM,2258 pytz/zoneinfo/America/New_York,sha256=7AoiEGjr3wV4P7C4Qs35COZqwr2mjNDq7ocpsSPFOM8,3536 pytz/zoneinfo/America/Nipigon,sha256=EGPXcOin8mfzFTkYJm4ICpY7fyE24I2pXg4ejafSMyU,2122 pytz/zoneinfo/America/Nome,sha256=2izM3-P-PqJ9za6MdhzFfMvPFNq7Gim69tAvEwPeY2s,2367 pytz/zoneinfo/America/Noronha,sha256=3R4lLV8jg5SljhC5OVVCk51Y77Efjo6zCe-oppg_FFo,716 pytz/zoneinfo/America/North_Dakota/Beulah,sha256=PHlzEk3wsNXYsfMZZSio7ZfdnyxPFpOhK3dS-1AJKGg,2380 pytz/zoneinfo/America/North_Dakota/Center,sha256=PaM52_JOVMEpVdw5qiOlhkp3qA0xp0d6Z9neOatmLKo,2380 pytz/zoneinfo/America/North_Dakota/New_Salem,sha256=o0xmH1FUh3lVFLtP5Lb9c0PfSyaPTsRvQSQYwnn_yls,2380 pytz/zoneinfo/America/Nuuk,sha256=FtlXWP_hBNuwBHkI2b1yne_tSUJpwLtWLyTHZoFZkmM,1878 pytz/zoneinfo/America/Ojinaga,sha256=cO3V-x_1Q-mpbJgKNd6-WTfxDEHBV1aqS4wzVl5A0Q4,1484 pytz/zoneinfo/America/Panama,sha256=kayA_pdpMcSQ0FjIzotdcf-m1JYfbKE-qcFT8LC8zqA,182 pytz/zoneinfo/America/Pangnirtung,sha256=P9Kw_I-NxcUYJIr1j40jTn9q7F8TPAE_FqXsfLYF86A,2094 pytz/zoneinfo/America/Paramaribo,sha256=Hm5tDwUmnoTrTUPEO4WArfSF74ZjywVEocy4kL51FzA,262 pytz/zoneinfo/America/Phoenix,sha256=nEOwYOnGxENw9zW8m50PGxbtVfTrX3QYAo4x4LgOLfI,328 pytz/zoneinfo/America/Port-au-Prince,sha256=09ZAJd4IOiMpfdpUuF1U44R_hRt6BvpAkFXOnYO9yOM,1434 pytz/zoneinfo/America/Port_of_Spain,sha256=17gT2eOVMFKJF_sypwDPudkFwGEijrRfkBU-aK3FL60,148 pytz/zoneinfo/America/Porto_Acre,sha256=17onkm8P_VgMkErjK9rr0qwNni7qp9tgcUZ93g3ltOs,628 pytz/zoneinfo/America/Porto_Velho,sha256=ZRfzgGEu26hnl3JPtiZLOSFGj_WBSbOKdiLC1xIyc5c,576 pytz/zoneinfo/America/Puerto_Rico,sha256=hJHlV_-AGoMGUWuMpZRv9fLmghrzFHfrR9fRkcxaZJc,246 pytz/zoneinfo/America/Punta_Arenas,sha256=kpqStczF3X0yK0lwOcxmwbQM8ZV9MrNktm7orJF-EJc,1902 pytz/zoneinfo/America/Rainy_River,sha256=r6kx6lD2IzCdygkj-DKyL2tPSn7k0Zil7PSHCBFKOa0,2122 pytz/zoneinfo/America/Rankin_Inlet,sha256=KpQX97-EuF4MNyxQrtOKP616CK_vjniM-lo14WGVz0c,1892 pytz/zoneinfo/America/Recife,sha256=ijFN2ZzZe5oBYdl8Ag3SwmGjj2JeVYYX2Vo767g2s6I,716 pytz/zoneinfo/America/Regina,sha256=yjqT08pHbICYe83H8JmtaDBvCFqRv7Tfze3Y8xuXukw,980 pytz/zoneinfo/America/Resolute,sha256=VP_u5XsepfSwx7Ou9zjGw2p5Qi10AIA54sP1J2DkppM,1892 pytz/zoneinfo/America/Rio_Branco,sha256=17onkm8P_VgMkErjK9rr0qwNni7qp9tgcUZ93g3ltOs,628 pytz/zoneinfo/America/Rosario,sha256=1XqIP8Qo2bPR7909hrAI-qAttybmwEW4ms7FjZA5Yfw,1076 pytz/zoneinfo/America/Santa_Isabel,sha256=OHHtvy3J70z6wvKBHgPqMEnGs6SXp8fkf0WX9ZiOODk,2342 pytz/zoneinfo/America/Santarem,sha256=Gl_lI3pPZ57UIYXWcmaTpFqWDA5re6bHh1nWs_Z0-Nc,602 pytz/zoneinfo/America/Santiago,sha256=GB14PW0xABV283dXc8qL-nnDW-ViFUR3bne7sg0Aido,2529 pytz/zoneinfo/America/Santo_Domingo,sha256=DKtaEj8fQ92ybITTWU4Bm160S9pzJmUVbjaWRnenxU4,458 pytz/zoneinfo/America/Sao_Paulo,sha256=cO3VGekMGdSf1y4f_UgkpDMRes26-l1oGUoDglIiUQg,1444 pytz/zoneinfo/America/Scoresbysund,sha256=dfHb86egoiNykb3bR3OHXpGFPm_Apck8BLiVTCqVAVc,1916 pytz/zoneinfo/America/Shiprock,sha256=6_yPo1_mvnt9DgpPzr0QdHsjdsfUG6ALnagQLML1DSM,2444 pytz/zoneinfo/America/Sitka,sha256=aiS7Fk37hZpzZ9VkeJQeF-BqTLRC1QOTCgMAJwT8UxA,2329 pytz/zoneinfo/America/St_Barthelemy,sha256=17gT2eOVMFKJF_sypwDPudkFwGEijrRfkBU-aK3FL60,148 pytz/zoneinfo/America/St_Johns,sha256=r1-17uKv27eZ3JsVkw_DLZQbo6wvjuuVu7C2pDsmOgI,3655 pytz/zoneinfo/America/St_Kitts,sha256=17gT2eOVMFKJF_sypwDPudkFwGEijrRfkBU-aK3FL60,148 pytz/zoneinfo/America/St_Lucia,sha256=17gT2eOVMFKJF_sypwDPudkFwGEijrRfkBU-aK3FL60,148 pytz/zoneinfo/America/St_Thomas,sha256=17gT2eOVMFKJF_sypwDPudkFwGEijrRfkBU-aK3FL60,148 pytz/zoneinfo/America/St_Vincent,sha256=17gT2eOVMFKJF_sypwDPudkFwGEijrRfkBU-aK3FL60,148 pytz/zoneinfo/America/Swift_Current,sha256=RRKOF7vZC8VvYxD8PP4J1_hUPayKBP7Lu80avRkfPDY,560 pytz/zoneinfo/America/Tegucigalpa,sha256=EzOz7ntTlreMq69JZ2CcAb8Ps98V9bUMN480tpPIyw4,252 pytz/zoneinfo/America/Thule,sha256=8xuPRaZU8RgO5ECqFYHYmnHioc81sBOailkVu8Y02i8,1502 pytz/zoneinfo/America/Thunder_Bay,sha256=cJ9lcf2mDZttEx_ttYYoZAJfuGhSsDgNV2PI-ggWdPE,2202 pytz/zoneinfo/America/Tijuana,sha256=OHHtvy3J70z6wvKBHgPqMEnGs6SXp8fkf0WX9ZiOODk,2342 pytz/zoneinfo/America/Toronto,sha256=ggOSzbHkmfgu9wTQzP0MUKsrKMbgveuAeThh1eFl1a0,3494 pytz/zoneinfo/America/Tortola,sha256=17gT2eOVMFKJF_sypwDPudkFwGEijrRfkBU-aK3FL60,148 pytz/zoneinfo/America/Vancouver,sha256=sknKH0jSPWam-DHfM35qXs8Nam7d5TFlkUI9Sgxryyg,2892 pytz/zoneinfo/America/Virgin,sha256=17gT2eOVMFKJF_sypwDPudkFwGEijrRfkBU-aK3FL60,148 pytz/zoneinfo/America/Whitehorse,sha256=FJn4DQlDyiQZyIUcCbnrUzRxR1J9BthA7S5g6Kz_CTI,1600 pytz/zoneinfo/America/Winnipeg,sha256=7P-_YQrneFcon7QKSTOnkiGjEppFDn3Z48MJ1qq8VBw,2868 pytz/zoneinfo/America/Yakutat,sha256=tFwnKbvwhyyn4LNTAn5ye_JWDdxjCerNDt7oOwUwO2M,2305 pytz/zoneinfo/America/Yellowknife,sha256=pfFvC8NEy373KbO6r6ec-Gw_O0D2h64mXU1X1AsUDgE,1966 pytz/zoneinfo/Antarctica/Casey,sha256=vIZdw_xcBjOYXXzwLw6XP_juvNbNf4Jd-cEWhwH7OCY,297 pytz/zoneinfo/Antarctica/Davis,sha256=6PokyOaaISRTN13sisuGgdt5vG5A2YqNooJpfLTb5SQ,297 pytz/zoneinfo/Antarctica/DumontDUrville,sha256=g8HQLY-aN3p6az-04KdHOdZYFnN__-8ltHRuY9eQX-I,194 pytz/zoneinfo/Antarctica/Macquarie,sha256=gMOjOx2dSBH4KxwGKivcaorkLUvw6ZIHFBPKCPCz9eg,1520 pytz/zoneinfo/Antarctica/Mawson,sha256=9TW1g_z0tk5EfeB7K69VJo8agO7-K9ZxWbiqNKnUZNE,199 pytz/zoneinfo/Antarctica/McMurdo,sha256=gADjoyPo_QISQU6UJrAgcHp3HDaMoOFRdH-d23uBSyc,2437 pytz/zoneinfo/Antarctica/Palmer,sha256=DW_DXByXg5MnMZ-w1bNdu8b0lKOYD_EgrPRd5EcyEm4,1418 pytz/zoneinfo/Antarctica/Rothera,sha256=QQI1m1IN4_2e6Bb0z-rOYaOwxp4XjMJDOKM9SFDUPKg,164 pytz/zoneinfo/Antarctica/South_Pole,sha256=gADjoyPo_QISQU6UJrAgcHp3HDaMoOFRdH-d23uBSyc,2437 pytz/zoneinfo/Antarctica/Syowa,sha256=VnmdVypdJUhsBw1XuXZEcEQIFmoiqoYcdpl8ht37QgY,165 pytz/zoneinfo/Antarctica/Troll,sha256=3zrh-P_jMCss9GGwHJJHkypZZydq4mkgo_TDqctn3c4,1162 pytz/zoneinfo/Antarctica/Vostok,sha256=6tx86WD3MVGJBCbOJUCoA6YlGwCn2BT4B85Zss0vz4Y,165 pytz/zoneinfo/Arctic/Longyearbyen,sha256=UdCERhj1JYpx3ojmilaRoyVoR4qMA1-PEv6hGwnpsJA,2228 pytz/zoneinfo/Asia/Aden,sha256=rq9KPj8l0FBnnKn93WkMeA1IngNtTzk5_oV4sEZhc4w,165 pytz/zoneinfo/Asia/Almaty,sha256=rBIl_pqZNmKZabjEa4mcsLahl9PbAdZJpQMQLVmcfBU,997 pytz/zoneinfo/Asia/Amman,sha256=bvwhc1hPCGvQMqWzaoCHrCA_y78n3H-Z2t4wHSocuAw,1853 pytz/zoneinfo/Asia/Anadyr,sha256=hDDTly45ejoVVP9Al07TmKpTACNGJaIPlcXLRbsG_4g,1188 pytz/zoneinfo/Asia/Aqtau,sha256=A5exZN256JagFJTcasgdCrQ8giOqZ2EFMRVYBWTaqZA,983 pytz/zoneinfo/Asia/Aqtobe,sha256=LQ7P5LEEe7jbWbjqvzmM79c0o6AdZeCExQS-fOWp8yw,1011 pytz/zoneinfo/Asia/Ashgabat,sha256=L4DYV2mZWycsYeHIypXzO6ZNY3tD8wjgxfPR2ZPW26c,619 pytz/zoneinfo/Asia/Ashkhabad,sha256=L4DYV2mZWycsYeHIypXzO6ZNY3tD8wjgxfPR2ZPW26c,619 pytz/zoneinfo/Asia/Atyrau,sha256=3uEo89ORyDJqQ_TtaQdIf9UPaB8WqIRQVi0geeY9gVE,991 pytz/zoneinfo/Asia/Baghdad,sha256=lQMSUnOuijbcoTaCqMNnYhnvKtS2IVP_kXFAzePVNDU,983 pytz/zoneinfo/Asia/Bahrain,sha256=V0rFJdLHIrToJ5Wl28VzVowwCVZoY8ZZSeNp-7kOvjY,199 pytz/zoneinfo/Asia/Baku,sha256=vhHnliaOdRyNudl0sFJFdLynEg0Hc0I-IiZNfbDeCbM,1227 pytz/zoneinfo/Asia/Bangkok,sha256=eYq0vh89N1j069URoQvtBu0ndEal6FPrtbF8WCKKpDw,199 pytz/zoneinfo/Asia/Barnaul,sha256=2c1Cq8XYlBgybRQMP8w0NCf7kaLDrPZtGn4M5iJZbJo,1221 pytz/zoneinfo/Asia/Beirut,sha256=_Z_2ZAg_iL9vU51JDB8CB04uXBDrf1kLIis-JnXaS2o,2154 pytz/zoneinfo/Asia/Bishkek,sha256=do_4ki1JvSKupUrvlz9jRkHspDhdvk1D2IkByFskjJM,983 pytz/zoneinfo/Asia/Brunei,sha256=BMMjwEmZ9rMoNpWfg8IrlLhRbMKbdW48padRF-FGolc,203 pytz/zoneinfo/Asia/Calcutta,sha256=6Qw0EDbLcgMgDik8s7UTJn4QSjmllPNeGVJU5rwKF88,285 pytz/zoneinfo/Asia/Chita,sha256=4ICOcAVAEWnP-cdf_YJu1_kCYnYPG2_vYfSbuNI-VwI,1221 pytz/zoneinfo/Asia/Choibalsan,sha256=sJQAAjiT9VyG73dYhpYkq4tcmfITcPpiAa8YXsDlKag,949 pytz/zoneinfo/Asia/Chongqing,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561 pytz/zoneinfo/Asia/Chungking,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561 pytz/zoneinfo/Asia/Colombo,sha256=HGea9jswIIgz7k20LTzbKtQyUun67IP5HvsZrmAJZJY,372 pytz/zoneinfo/Asia/Dacca,sha256=3K5llGhcpCdZMMcJuomICVv7lZlDRpU4PUb5DtFx8l4,337 pytz/zoneinfo/Asia/Damascus,sha256=6mcB6bxH1KsLqzb_LmJUT3tUDnq9_ScLFKoMFkcZy3A,2294 pytz/zoneinfo/Asia/Dhaka,sha256=3K5llGhcpCdZMMcJuomICVv7lZlDRpU4PUb5DtFx8l4,337 pytz/zoneinfo/Asia/Dili,sha256=ptjbacc9JK0pv2JpD-gHMglrwYNj9LMMIua0U0ZTMUc,227 pytz/zoneinfo/Asia/Dubai,sha256=-ga0m3ua9Y6kSWREz2_VdtcVAkq83WrW3vxjBI7WNGs,165 pytz/zoneinfo/Asia/Dushanbe,sha256=FUk9Tt_GimfRulcWamEvuOvA7FQ52YfZqQ2w88qMx6M,591 pytz/zoneinfo/Asia/Famagusta,sha256=CFrcygd8ude5x6OEtfM_Dw0KYHoxpPPzq46KoHVxjjc,2028 pytz/zoneinfo/Asia/Gaza,sha256=VQl9UGxRirFOqw91pba2Ukner-_lKv1lu_OZ77g17kc,2316 pytz/zoneinfo/Asia/Harbin,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561 pytz/zoneinfo/Asia/Hebron,sha256=biIIT_LY-UdgdNsF-inW4A8CPMGtyggJFc4iaq_0xsM,2344 pytz/zoneinfo/Asia/Ho_Chi_Minh,sha256=L5TXNg6-odIIn-JAyLTR8fKFiUFBNFwy0HzwZchbnm4,351 pytz/zoneinfo/Asia/Hong_Kong,sha256=UcnFEc9S8hMWl9giVXni4TAhLPWX0H12XvwSt4AJHew,1203 pytz/zoneinfo/Asia/Hovd,sha256=JUnOos7PNTi2VRKxD6XnaVR3NpuhsX_Pi18rIzVe1xw,891 pytz/zoneinfo/Asia/Irkutsk,sha256=iUJZCVBjpfB4rNKJOr6g0zUZtccYYk_Gk0wTklx8Yj0,1243 pytz/zoneinfo/Asia/Istanbul,sha256=2S0A_f7VxvyErJMMCPqK33AChA29IVkMr1o-SpMtMxk,1947 pytz/zoneinfo/Asia/Jakarta,sha256=_WRgz6Zb6wxIXtMwpKjG4w4PJtDRzkhdrw-3a4NCBFA,355 pytz/zoneinfo/Asia/Jayapura,sha256=ihzUd-L8HUVqG-Na10MyPE-YYwjVFj-xerqjTN4EJZs,221 pytz/zoneinfo/Asia/Jerusalem,sha256=xpEJ_vI7aMV0iFD5BN9sq71vlYUZj5Q613TUUwZl1Ww,2288 pytz/zoneinfo/Asia/Kabul,sha256=ial7SvweHTQXDl79MnXm6QHtiw2i7Zt1e5urLXU8Sq8,208 pytz/zoneinfo/Asia/Kamchatka,sha256=pBA0RbynKTKsMCmf2hJMZ_hgVUPemms-VceMMJ7QC64,1166 pytz/zoneinfo/Asia/Karachi,sha256=iB-mWMTXUyfBwAkZdz8_UmEw0xsgxIub-KNI7akzhkk,379 pytz/zoneinfo/Asia/Kashgar,sha256=AEXDJ5PxQOhePZZw1QZl98moDNa-bW3I3WVNQZHBPYA,165 pytz/zoneinfo/Asia/Kathmandu,sha256=TUeW7rDSifOTSsNxvo9igIYZfGITEZUf-0EjglyRDWs,212 pytz/zoneinfo/Asia/Katmandu,sha256=TUeW7rDSifOTSsNxvo9igIYZfGITEZUf-0EjglyRDWs,212 pytz/zoneinfo/Asia/Khandyga,sha256=XYzE2tsE5Say9pg0cHDQkEE9aTuy2piFSLAGx_d-dmM,1271 pytz/zoneinfo/Asia/Kolkata,sha256=6Qw0EDbLcgMgDik8s7UTJn4QSjmllPNeGVJU5rwKF88,285 pytz/zoneinfo/Asia/Krasnoyarsk,sha256=nzRw4PI2AiK_Ge854b8U7TSDw0LGQy3ca5YuOOU2XwI,1207 pytz/zoneinfo/Asia/Kuala_Lumpur,sha256=RfiIYo6sMEkSA8m5iUmyOyJzKZrgRs8ehGuDZwoq88k,383 pytz/zoneinfo/Asia/Kuching,sha256=KsAtQ0aocINozixwW7CkorY-1PTLlsj7UUnQGQMEYTQ,483 pytz/zoneinfo/Asia/Kuwait,sha256=rq9KPj8l0FBnnKn93WkMeA1IngNtTzk5_oV4sEZhc4w,165 pytz/zoneinfo/Asia/Macao,sha256=MvAkRyRsrA2r052ItlyF5bh2FheRjI0jPwg0uIiH2Yk,1227 pytz/zoneinfo/Asia/Macau,sha256=MvAkRyRsrA2r052ItlyF5bh2FheRjI0jPwg0uIiH2Yk,1227 pytz/zoneinfo/Asia/Magadan,sha256=cqwjKQt8TlznM1w2DezAZuz1EjeOfLxPeSY19i9zkfQ,1222 pytz/zoneinfo/Asia/Makassar,sha256=OhJtCqSTEU-u5n0opBVO5Bu-wQzcYPy9S_6aAhJXgOw,254 pytz/zoneinfo/Asia/Manila,sha256=ujfq0kl1EhxcYSOrG-FS750aNaYUt1TT4bFuK4EcL_c,328 pytz/zoneinfo/Asia/Muscat,sha256=-ga0m3ua9Y6kSWREz2_VdtcVAkq83WrW3vxjBI7WNGs,165 pytz/zoneinfo/Asia/Nicosia,sha256=0Unm0IFT7HyGeQ7F3vTa_-klfysCgrulqFO6BD1plZU,2002 pytz/zoneinfo/Asia/Novokuznetsk,sha256=vQGcqKdmYmWDdl73QPZTcyadnope1RPJ4oBgZelQu90,1165 pytz/zoneinfo/Asia/Novosibirsk,sha256=ApL3s20HX2eIAno03HCa2RXdlLotVb9JvnZl7W1sM00,1221 pytz/zoneinfo/Asia/Omsk,sha256=wxbEesfe7dJOkNPffqTwT6wuTSSTM6E9f0uFMAyzMCM,1207 pytz/zoneinfo/Asia/Oral,sha256=iMjqD4LvDgyxN15v7CqyEdBDyBFaOlChwX1wHz2JiVQ,1005 pytz/zoneinfo/Asia/Phnom_Penh,sha256=eYq0vh89N1j069URoQvtBu0ndEal6FPrtbF8WCKKpDw,199 pytz/zoneinfo/Asia/Pontianak,sha256=inOXwuKtjKv1z_eliPZSIqjSt6whtuxhPeG1YpjU_BQ,353 pytz/zoneinfo/Asia/Pyongyang,sha256=_-g3GnDAtfDX4XAktXH9jFouLUDmOovnjoOfvRpUDsE,237 pytz/zoneinfo/Asia/Qatar,sha256=V0rFJdLHIrToJ5Wl28VzVowwCVZoY8ZZSeNp-7kOvjY,199 pytz/zoneinfo/Asia/Qostanay,sha256=UGYEvmZfAAS9D6EMGd0n6-r_Az_zgTDSWLPeHzFLfu0,1011 pytz/zoneinfo/Asia/Qyzylorda,sha256=aiSRxwoUbQ-TBHf2wcyaOhQb86j3jQpXwcQaSPnAtwU,1025 pytz/zoneinfo/Asia/Rangoon,sha256=ZHuX-XVHr8dGJjrPQ5cW7b8jQUv3ihyd-VzN545mlMA,268 pytz/zoneinfo/Asia/Riyadh,sha256=rq9KPj8l0FBnnKn93WkMeA1IngNtTzk5_oV4sEZhc4w,165 pytz/zoneinfo/Asia/Saigon,sha256=L5TXNg6-odIIn-JAyLTR8fKFiUFBNFwy0HzwZchbnm4,351 pytz/zoneinfo/Asia/Sakhalin,sha256=95AdPwOgSe0g9wdx67kKLDbjvY3FtpeVBoAWbJVco0w,1202 pytz/zoneinfo/Asia/Samarkand,sha256=BBe6Gg_KlSQuS5hAyvvhZWmClcLJaFjnCNGC391HHQM,577 pytz/zoneinfo/Asia/Seoul,sha256=LI9LsV3XcJC0l-KoQf8zI-y7rk-du57erS-N2Ptdi7Q,617 pytz/zoneinfo/Asia/Shanghai,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561 pytz/zoneinfo/Asia/Singapore,sha256=hIgr_LHMTWh3GgeG-MmLHBp-9anUxQcfMlKFtX8WvmU,383 pytz/zoneinfo/Asia/Srednekolymsk,sha256=0DllW8q5VgXEMV5c_nLJElZsNpauvNhNACQpcgdqEl0,1208 pytz/zoneinfo/Asia/Taipei,sha256=DMmQwOpPql25ue3Nf8vAKKT4em06D1Z9rHbLIitxixk,761 pytz/zoneinfo/Asia/Tashkent,sha256=LS-yTxh0v1vmJoQ9I6fY-IERk7ukPmovVx2Ut_-b-Ig,591 pytz/zoneinfo/Asia/Tbilisi,sha256=w6UNxgyn4BVVTF5WkAtxo_u7nnIY26makKQ5nRgifds,1035 pytz/zoneinfo/Asia/Tehran,sha256=ATT50Q0hK6uSba5_WnOE3Px0OWxIwxaqK5Oi10P2A-M,2582 pytz/zoneinfo/Asia/Tel_Aviv,sha256=xpEJ_vI7aMV0iFD5BN9sq71vlYUZj5Q613TUUwZl1Ww,2288 pytz/zoneinfo/Asia/Thimbu,sha256=uia8or5dtDkxVUZrcLwkjbTz9C7ZhLq0T4jlE4YvuvQ,203 pytz/zoneinfo/Asia/Thimphu,sha256=uia8or5dtDkxVUZrcLwkjbTz9C7ZhLq0T4jlE4YvuvQ,203 pytz/zoneinfo/Asia/Tokyo,sha256=oCueZgRNxcNcX3ZGdif9y6Su4cyVhga4XHdwlcrYLOs,309 pytz/zoneinfo/Asia/Tomsk,sha256=77YgdJLxETRKjQjnaHHf54xBAqNywTDwQQmZ5v6Aq28,1221 pytz/zoneinfo/Asia/Ujung_Pandang,sha256=OhJtCqSTEU-u5n0opBVO5Bu-wQzcYPy9S_6aAhJXgOw,254 pytz/zoneinfo/Asia/Ulaanbaatar,sha256=uyQSzIBl0f2TXHrmUm3VPs1C9ro013hYmAlx6yUjh3Y,891 pytz/zoneinfo/Asia/Ulan_Bator,sha256=uyQSzIBl0f2TXHrmUm3VPs1C9ro013hYmAlx6yUjh3Y,891 pytz/zoneinfo/Asia/Urumqi,sha256=AEXDJ5PxQOhePZZw1QZl98moDNa-bW3I3WVNQZHBPYA,165 pytz/zoneinfo/Asia/Ust-Nera,sha256=JAZhRAPdbOL9AL-WHOL8aZjxdZxLmGDNBGMCw9TKtR8,1252 pytz/zoneinfo/Asia/Vientiane,sha256=eYq0vh89N1j069URoQvtBu0ndEal6FPrtbF8WCKKpDw,199 pytz/zoneinfo/Asia/Vladivostok,sha256=Wokhgtj2nwUj992h7SyfB_fRNHAKfPNzhsf_oZpim8c,1208 pytz/zoneinfo/Asia/Yakutsk,sha256=RVCIl52EvMrp2RG2hg2cjDSr9QhsscaAT-NV81xw7zc,1207 pytz/zoneinfo/Asia/Yangon,sha256=ZHuX-XVHr8dGJjrPQ5cW7b8jQUv3ihyd-VzN545mlMA,268 pytz/zoneinfo/Asia/Yekaterinburg,sha256=NzVc2DiPeyw0FdMHwSPQJF9k3tvWdtrETZiN58pyxLk,1243 pytz/zoneinfo/Asia/Yerevan,sha256=k0WHtWQW_cBCjcEv8nP01cVPeTVDlf18lQ0_u6cin1o,1151 pytz/zoneinfo/Atlantic/Azores,sha256=ut7TdE-xiQNjRybg56Tt5b7Zo5zqbuF5IFci2aDMs1Q,3484 pytz/zoneinfo/Atlantic/Bermuda,sha256=164Ap4_hmYOtKX9guV1DrlhSH9LSnMiHEo6vpS8faSw,1978 pytz/zoneinfo/Atlantic/Canary,sha256=ymK9ufqphvNjDK3hzikN4GfkcR3QeCBiPKyVc6FjlbA,1897 pytz/zoneinfo/Atlantic/Cape_Verde,sha256=ESQvE3deMI-lx9mG0yJLEsFX5KRl-7c6gD5O2h0Zm9Q,270 pytz/zoneinfo/Atlantic/Faeroe,sha256=NibdZPZtapnYR_myIZnMdTaSKGsOBGgujj0_T2NvAzs,1815 pytz/zoneinfo/Atlantic/Faroe,sha256=NibdZPZtapnYR_myIZnMdTaSKGsOBGgujj0_T2NvAzs,1815 pytz/zoneinfo/Atlantic/Jan_Mayen,sha256=UdCERhj1JYpx3ojmilaRoyVoR4qMA1-PEv6hGwnpsJA,2228 pytz/zoneinfo/Atlantic/Madeira,sha256=e1K2l8ykd8xpznQNs3SSuIZ1ZfVx2Y69EXrhvYV3P14,3475 pytz/zoneinfo/Atlantic/Reykjavik,sha256=mSkaRBGZLeUrm88EeHcaWnEd35Wn-Ag2G10HtI3G2fg,1162 pytz/zoneinfo/Atlantic/South_Georgia,sha256=QZ72fRKp6Kgvy7DfyHGht1MVnzGgSPujLQd4XMjNrrc,164 pytz/zoneinfo/Atlantic/St_Helena,sha256=0u-sTl8j2IyV1ywdtCgHFw9S9D3ZiiBa9akqkbny2Zc,148 pytz/zoneinfo/Atlantic/Stanley,sha256=exKMLw-P952wS1FTxVjnUU1mkD2OvKUDwtDt8IGgf8w,1214 pytz/zoneinfo/Australia/ACT,sha256=lXwgrHWvytX_hJuWsHPSFPNObHReMZ-36D3e_QXMVhk,2204 pytz/zoneinfo/Australia/Adelaide,sha256=96rGNE9D_GsPI9WH0M8eH4AYG__5B5ZlgQk_rLOfY80,2222 pytz/zoneinfo/Australia/Brisbane,sha256=r_riK-wbS5fRi0eZxkUQre1nHTQ_q7dMwZ5CRVhS9vI,433 pytz/zoneinfo/Australia/Broken_Hill,sha256=kdigsOjUvUIi4Tyq2lcZnvSHGoWTLh6J3-PeE5MRaQc,2243 pytz/zoneinfo/Australia/Canberra,sha256=lXwgrHWvytX_hJuWsHPSFPNObHReMZ-36D3e_QXMVhk,2204 pytz/zoneinfo/Australia/Currie,sha256=R0oL8IXO87BcbO57nR5d-H21I2ndxQSPsgMYlgR-qhc,2204 pytz/zoneinfo/Australia/Darwin,sha256=rOoBtYLkk7aeoshCOihPeseuxM184yp7uhSHbdT0FoU,304 pytz/zoneinfo/Australia/Eucla,sha256=MmcY-HkzU0mccRVN0GSXSZ072x7NanzSS3dDdIjLRl4,484 pytz/zoneinfo/Australia/Hobart,sha256=Rw57MtxlcRbn-ZszuSjgxMHmpSLLQThAXeqL9l9TvXw,2316 pytz/zoneinfo/Australia/LHI,sha256=Luf0Lx_iJHuh3kZd4LxRjf36tLF5-wW2UFMVNKNT7gg,1860 pytz/zoneinfo/Australia/Lindeman,sha256=jkXejV1-5ZLpMTj450TAwKcMPZtuaoKLcSugLsunqBs,489 pytz/zoneinfo/Australia/Lord_Howe,sha256=Luf0Lx_iJHuh3kZd4LxRjf36tLF5-wW2UFMVNKNT7gg,1860 pytz/zoneinfo/Australia/Melbourne,sha256=MimH3imrwSUOLJNjIsfQbc5I_6kU6H-VEL8humwNHFk,2204 pytz/zoneinfo/Australia/NSW,sha256=lXwgrHWvytX_hJuWsHPSFPNObHReMZ-36D3e_QXMVhk,2204 pytz/zoneinfo/Australia/North,sha256=rOoBtYLkk7aeoshCOihPeseuxM184yp7uhSHbdT0FoU,304 pytz/zoneinfo/Australia/Perth,sha256=d0oXb77ElK6sKmU7Q-Lsmff0bz6Uk7X3hFMverH2InM,460 pytz/zoneinfo/Australia/Queensland,sha256=r_riK-wbS5fRi0eZxkUQre1nHTQ_q7dMwZ5CRVhS9vI,433 pytz/zoneinfo/Australia/South,sha256=96rGNE9D_GsPI9WH0M8eH4AYG__5B5ZlgQk_rLOfY80,2222 pytz/zoneinfo/Australia/Sydney,sha256=lXwgrHWvytX_hJuWsHPSFPNObHReMZ-36D3e_QXMVhk,2204 pytz/zoneinfo/Australia/Tasmania,sha256=Rw57MtxlcRbn-ZszuSjgxMHmpSLLQThAXeqL9l9TvXw,2316 pytz/zoneinfo/Australia/Victoria,sha256=MimH3imrwSUOLJNjIsfQbc5I_6kU6H-VEL8humwNHFk,2204 pytz/zoneinfo/Australia/West,sha256=d0oXb77ElK6sKmU7Q-Lsmff0bz6Uk7X3hFMverH2InM,460 pytz/zoneinfo/Australia/Yancowinna,sha256=kdigsOjUvUIi4Tyq2lcZnvSHGoWTLh6J3-PeE5MRaQc,2243 pytz/zoneinfo/Brazil/Acre,sha256=17onkm8P_VgMkErjK9rr0qwNni7qp9tgcUZ93g3ltOs,628 pytz/zoneinfo/Brazil/DeNoronha,sha256=3R4lLV8jg5SljhC5OVVCk51Y77Efjo6zCe-oppg_FFo,716 pytz/zoneinfo/Brazil/East,sha256=cO3VGekMGdSf1y4f_UgkpDMRes26-l1oGUoDglIiUQg,1444 pytz/zoneinfo/Brazil/West,sha256=lp6RlkcXJQ7mSsKqnEgC8svJVrFDJk_16xxvfpNSpK4,604 pytz/zoneinfo/CET,sha256=o4omkrM_IsITxooUo8krM921XfBdvRs9JhwGXGd-Ypg,2094 pytz/zoneinfo/CST6CDT,sha256=WGbtZ1FwjRX6Jeo_TCXKsfeDs4V9uhXGJfcnLJhk3s0,2310 pytz/zoneinfo/Canada/Atlantic,sha256=TZpmc5PwWoLfTfQoQ_b3U17BE2iVKSeNkR0Ho8mbTn8,3424 pytz/zoneinfo/Canada/Central,sha256=7P-_YQrneFcon7QKSTOnkiGjEppFDn3Z48MJ1qq8VBw,2868 pytz/zoneinfo/Canada/Eastern,sha256=ggOSzbHkmfgu9wTQzP0MUKsrKMbgveuAeThh1eFl1a0,3494 pytz/zoneinfo/Canada/Mountain,sha256=-TkIfc3QlvaCf0p8COZ43Y1HRBAl-nARUi-JdXeK1vE,2332 pytz/zoneinfo/Canada/Newfoundland,sha256=r1-17uKv27eZ3JsVkw_DLZQbo6wvjuuVu7C2pDsmOgI,3655 pytz/zoneinfo/Canada/Pacific,sha256=sknKH0jSPWam-DHfM35qXs8Nam7d5TFlkUI9Sgxryyg,2892 pytz/zoneinfo/Canada/Saskatchewan,sha256=yjqT08pHbICYe83H8JmtaDBvCFqRv7Tfze3Y8xuXukw,980 pytz/zoneinfo/Canada/Yukon,sha256=FJn4DQlDyiQZyIUcCbnrUzRxR1J9BthA7S5g6Kz_CTI,1600 pytz/zoneinfo/Chile/Continental,sha256=GB14PW0xABV283dXc8qL-nnDW-ViFUR3bne7sg0Aido,2529 pytz/zoneinfo/Chile/EasterIsland,sha256=paHp1QRXIa02kgd0-4V6vWXdqcwheow-hJQD9VqacfQ,2233 pytz/zoneinfo/Cuba,sha256=HUQeAuKBsEkI5SLZjqynXICOUVOajkKzKH5r-Ov5Odc,2416 pytz/zoneinfo/EET,sha256=gGVsW5-qnI7ty8vqVK1ADWhunrvAT8kUC79GUf-_7G8,1908 pytz/zoneinfo/EST,sha256=uKE_VPKfxGyYEsyqV_DdE2MW55vs_qUioOdIn5Goobc,114 pytz/zoneinfo/EST5EDT,sha256=fwzEMT1jgnY2dDjd0EqDl26_7LC-oF48Bd4ng5311H0,2310 pytz/zoneinfo/Egypt,sha256=L6zLQLnQtLkEELOGfm6USaHY33qAEPgGV822-iU1vxc,1955 pytz/zoneinfo/Eire,sha256=-JSA3vsi44F1DE8supVjSppH2Vpp12WjJI0_COtAmqU,3492 pytz/zoneinfo/Etc/GMT,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 pytz/zoneinfo/Etc/GMT+0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 pytz/zoneinfo/Etc/GMT+1,sha256=1Qzl2X9rQ_RXEf11yH09wQZCr_ph6UdFP7E0yu9s-IQ,116 pytz/zoneinfo/Etc/GMT+10,sha256=JEQyQyQlkC0o6ZTdeVjZhCIOh6cK5TF7H00Pkls-sUI,117 pytz/zoneinfo/Etc/GMT+11,sha256=tWvcvYMFCaE60nJVvDrrov7stJvs1KQYOyrhl3dzcUs,117 pytz/zoneinfo/Etc/GMT+12,sha256=b70HEhErq8IJmq8x7cOZy4eR__3fq5uHHpjvPBEHqMA,117 pytz/zoneinfo/Etc/GMT+2,sha256=T6Ep5zhslBKbYaECFUB6gUKh3iTZPyMoW1kjhonxrUo,116 pytz/zoneinfo/Etc/GMT+3,sha256=QGoYrE04bUJ-OzL37dt2MZT5FxWNLpJDPVXgJbstYZA,116 pytz/zoneinfo/Etc/GMT+4,sha256=RWrkNki-wV7X-coe0VvufBe6LrWVpkPJgia5QQYEnBo,116 pytz/zoneinfo/Etc/GMT+5,sha256=oRmeC41dgYXT-zzyZIRKXN9IvdL2Da5nTuwmG2_prIA,116 pytz/zoneinfo/Etc/GMT+6,sha256=d6dAnwiejyFI2n7AzFlFW0aFAT6zYNEjBIEG0uu0sbQ,116 pytz/zoneinfo/Etc/GMT+7,sha256=TqjYbzd0YHpx1wisFg08J19wTpg6ztJLLongZY_lozs,116 pytz/zoneinfo/Etc/GMT+8,sha256=th_8bIMmYgRPCesBrbmBhRr0jQO7whd70LiY9HfwJyk,116 pytz/zoneinfo/Etc/GMT+9,sha256=Qq5E6iUS7JMJIymT7YoqlI8MtqtVy0mr9t6zWFtWc9Y,116 pytz/zoneinfo/Etc/GMT-0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 pytz/zoneinfo/Etc/GMT-1,sha256=73F1eU8uAQGP3mcoB2q99CjfManGFHk3fefljp9pYC4,117 pytz/zoneinfo/Etc/GMT-10,sha256=fKWWNwLBOp1OkKjtc1w9LIXJR1mTTD-JdvYflRy1IrU,118 pytz/zoneinfo/Etc/GMT-11,sha256=D2S79n6psa9t9_2vj5wIrFpHH2OJLcCKP6vtwzFZINY,118 pytz/zoneinfo/Etc/GMT-12,sha256=me4V6lmWI8gSr8H7N41WAD0Eww1anh_EF34Qr9UoSnI,118 pytz/zoneinfo/Etc/GMT-13,sha256=xbmbG1BQA6Dlpa_iUwEGyJxW4a3t6lmawdPKAE8vbR8,118 pytz/zoneinfo/Etc/GMT-14,sha256=PpXoREBh02qFpvxVMj2pV9IAzSQvBE7XPvnN9qSZ-Kc,118 pytz/zoneinfo/Etc/GMT-2,sha256=ve6hWLdeuiLhqagaWLqMD6HNybS1chRwjudfTZ2bYBE,117 pytz/zoneinfo/Etc/GMT-3,sha256=N77jILanuLDVkLsdujXZSu-dsHiwN5MIpwh7fMUifso,117 pytz/zoneinfo/Etc/GMT-4,sha256=LSko5fVHqPl5zfwjGqkbMa_OFnvtpT6o_4xYxNz9n5o,117 pytz/zoneinfo/Etc/GMT-5,sha256=uLaSR5Mb18HRTsAA5SveY9PAJ97dO8QzIWqNXe3wZb4,117 pytz/zoneinfo/Etc/GMT-6,sha256=JSN-RUAphJ50fpIv7cYC6unrtrz9S1Wma-piDHlGe7c,117 pytz/zoneinfo/Etc/GMT-7,sha256=vVAOF8xU9T9ESnw68c0SFXpcvkoopaiwTR0zbefHHSU,117 pytz/zoneinfo/Etc/GMT-8,sha256=S7xFQbFMpiDZy4v5L4D9fCrjRIzzoLC5p8Se23xi7us,117 pytz/zoneinfo/Etc/GMT-9,sha256=I5vHNmUK-Yyg_S1skFN44VGVzBgktjFgVQiDIKO4aMI,117 pytz/zoneinfo/Etc/GMT0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 pytz/zoneinfo/Etc/Greenwich,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 pytz/zoneinfo/Etc/UCT,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 pytz/zoneinfo/Etc/UTC,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 pytz/zoneinfo/Etc/Universal,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 pytz/zoneinfo/Etc/Zulu,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 pytz/zoneinfo/Europe/Amsterdam,sha256=pw8HngVt3bU5QrRzu70qOmf69TIyklkglvVUte9ntKo,2910 pytz/zoneinfo/Europe/Andorra,sha256=gTB5jCQmvIw3JJi1_vAcOYuhtzPBR6RXUx9gVV6p6ug,1742 pytz/zoneinfo/Europe/Astrakhan,sha256=ywtzL92KVfoybOmAhE9eHqmMcvJZm5b0js5GDdWIJEQ,1165 pytz/zoneinfo/Europe/Athens,sha256=XDY-FBUddRyQHN8GxQLZ4awjuOlWlzlUdjv7OdXFNzA,2262 pytz/zoneinfo/Europe/Belfast,sha256=xp08wV44TZMmAdBqppttDChQAb8tRN03GcEht99RYtY,3648 pytz/zoneinfo/Europe/Belgrade,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 pytz/zoneinfo/Europe/Berlin,sha256=XuR19xoPwaMvrrhJ-MOcbnqmbW1B7HQrl7OnQ2s7BwE,2298 pytz/zoneinfo/Europe/Bratislava,sha256=G9fdhUXmzx651BnyZ6V7AOYIV9EV5aMJMm44eJaLLZw,2301 pytz/zoneinfo/Europe/Brussels,sha256=gS9Vrrbozend9HhuFetCVrIegs9fXSjaG60X2UVwysA,2933 pytz/zoneinfo/Europe/Bucharest,sha256=nfg6-bU2D6DMEWb9EMIBR5kxnNsbDSx0UKfHH_ZzqFc,2184 pytz/zoneinfo/Europe/Budapest,sha256=J2tBUmArS5llMhfeILd3UGELv1Bup7DMsh4lX3qhqy4,2368 pytz/zoneinfo/Europe/Busingen,sha256=K5QY7Ujj2VUchKR4bhhb0hgdAJhmwED71ykXDQOGKe8,1909 pytz/zoneinfo/Europe/Chisinau,sha256=p1J_rqFE13pL8cpBRrEFe-teCI8f0fKK4uTUy_4diF4,2390 pytz/zoneinfo/Europe/Copenhagen,sha256=q7iAbkd7y9QvbAi6XGZEUOTwNDCRYWRu9VQCxUrZ01U,2137 pytz/zoneinfo/Europe/Dublin,sha256=-JSA3vsi44F1DE8supVjSppH2Vpp12WjJI0_COtAmqU,3492 pytz/zoneinfo/Europe/Gibraltar,sha256=egOcazf2u1njGZ0tDj-f1NzZT_K5rpUKSqtShxO7U6c,3052 pytz/zoneinfo/Europe/Guernsey,sha256=xp08wV44TZMmAdBqppttDChQAb8tRN03GcEht99RYtY,3648 pytz/zoneinfo/Europe/Helsinki,sha256=GEkB7LsVhmegt7YuuWheCDvDGC7b7Nw9bTdDGS9qkJc,1900 pytz/zoneinfo/Europe/Isle_of_Man,sha256=xp08wV44TZMmAdBqppttDChQAb8tRN03GcEht99RYtY,3648 pytz/zoneinfo/Europe/Istanbul,sha256=2S0A_f7VxvyErJMMCPqK33AChA29IVkMr1o-SpMtMxk,1947 pytz/zoneinfo/Europe/Jersey,sha256=xp08wV44TZMmAdBqppttDChQAb8tRN03GcEht99RYtY,3648 pytz/zoneinfo/Europe/Kaliningrad,sha256=s7GXSe1YvMcs7AiUhHNTA6I4nAOQn_Kmz_ZqJYO-LMM,1493 pytz/zoneinfo/Europe/Kiev,sha256=iVkTPFkl2tADYapa1HASlaV3tT2VsJpTPTTJC_9HtAk,2088 pytz/zoneinfo/Europe/Kirov,sha256=Sr4HEUwk3tPTXioeCLhvlgKbCAFU7Gy2UB3f--uWLDc,1153 pytz/zoneinfo/Europe/Lisbon,sha256=L6n3snx6pNHHJIL6JOLFOAlYkQ2J5uB_y5MG_Ic_PDU,3469 pytz/zoneinfo/Europe/Ljubljana,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 pytz/zoneinfo/Europe/London,sha256=xp08wV44TZMmAdBqppttDChQAb8tRN03GcEht99RYtY,3648 pytz/zoneinfo/Europe/Luxembourg,sha256=974Dvf_X1QISKG1zIiTJJIfGavobO21HUVS-HfysOcY,2946 pytz/zoneinfo/Europe/Madrid,sha256=MTTMnrbDDtexRikd72-FbQEpCZjc63_UtBIiDomD95c,2614 pytz/zoneinfo/Europe/Malta,sha256=xRwBfrV8hOihGtqcek5_B6l5hjc206g3yfbEWXIaUis,2620 pytz/zoneinfo/Europe/Mariehamn,sha256=GEkB7LsVhmegt7YuuWheCDvDGC7b7Nw9bTdDGS9qkJc,1900 pytz/zoneinfo/Europe/Minsk,sha256=mn86zdrNWpJYDfE51Iy9n1-Zi2piTyb9EPaS2A-uGJQ,1321 pytz/zoneinfo/Europe/Monaco,sha256=8DHr1ymf4c5sZKAzLBd4GhXsTZUXMOYUKhhVmmGRdrs,2944 pytz/zoneinfo/Europe/Moscow,sha256=KmkofRcj6T8Ph28PJChm8JVp13uRvef6TZ0GuPzUiDw,1535 pytz/zoneinfo/Europe/Nicosia,sha256=0Unm0IFT7HyGeQ7F3vTa_-klfysCgrulqFO6BD1plZU,2002 pytz/zoneinfo/Europe/Oslo,sha256=UdCERhj1JYpx3ojmilaRoyVoR4qMA1-PEv6hGwnpsJA,2228 pytz/zoneinfo/Europe/Paris,sha256=wMOaHPLy0KwYfPxMbNq_B8U21RctvO0go5jhc0TlXCQ,2962 pytz/zoneinfo/Europe/Podgorica,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 pytz/zoneinfo/Europe/Prague,sha256=G9fdhUXmzx651BnyZ6V7AOYIV9EV5aMJMm44eJaLLZw,2301 pytz/zoneinfo/Europe/Riga,sha256=hJ2_0m1taW9IuA-hMyP5n-WX7YOrR0heKszJhgljRWk,2198 pytz/zoneinfo/Europe/Rome,sha256=-X5F_d3Dz0kBRWiUTXUN-fgeCHbUEHLaaHIwEPZEdUQ,2641 pytz/zoneinfo/Europe/Samara,sha256=z2innqSZ8_lkEy8cIyF9JM_FfnO2sWZaqeFqOh8pD7M,1215 pytz/zoneinfo/Europe/San_Marino,sha256=-X5F_d3Dz0kBRWiUTXUN-fgeCHbUEHLaaHIwEPZEdUQ,2641 pytz/zoneinfo/Europe/Sarajevo,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 pytz/zoneinfo/Europe/Saratov,sha256=BMej49HlQG24CWCh5VOENrB3jPuJPScPszRtb7MrJ3I,1183 pytz/zoneinfo/Europe/Simferopol,sha256=_M6LXB5Rqh932nKIJotGjT8YNszAOb7RjHN5ng-uW1Y,1453 pytz/zoneinfo/Europe/Skopje,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 pytz/zoneinfo/Europe/Sofia,sha256=hCQKXfMNrnA5xHNw_uzTjKzVw4-Bvsq5oGO4yUCv5tY,2077 pytz/zoneinfo/Europe/Stockholm,sha256=Xgp4GSh8-pzdeJeP8TQ20jWDDUj17R69h6RYTbLYd2g,1909 pytz/zoneinfo/Europe/Tallinn,sha256=4a6JC0aIpMzqIV7O35zoG0LLJwkQq5AoXZ2ivkic6-w,2148 pytz/zoneinfo/Europe/Tirane,sha256=ztlZyCS9WCXeVW8nBun3Tyi5HUY0EtFbiBbEc1gucuw,2084 pytz/zoneinfo/Europe/Tiraspol,sha256=p1J_rqFE13pL8cpBRrEFe-teCI8f0fKK4uTUy_4diF4,2390 pytz/zoneinfo/Europe/Ulyanovsk,sha256=nFsgcVTmTiiFzHtyJDRnO-3H4GRAfAeceb6b2jFHLUQ,1267 pytz/zoneinfo/Europe/Uzhgorod,sha256=TIG1rC4QR7nz-vO1VtmN9mDMVjKPDKi7mEB9KpfJOBA,2050 pytz/zoneinfo/Europe/Vaduz,sha256=K5QY7Ujj2VUchKR4bhhb0hgdAJhmwED71ykXDQOGKe8,1909 pytz/zoneinfo/Europe/Vatican,sha256=-X5F_d3Dz0kBRWiUTXUN-fgeCHbUEHLaaHIwEPZEdUQ,2641 pytz/zoneinfo/Europe/Vienna,sha256=ZmI3kADE6bnrJEccqh73XXBY36L1G4DkpiTQImtNrUk,2200 pytz/zoneinfo/Europe/Vilnius,sha256=UFzRX3orCTB8d9IzlxJPy5eUA2oBPuCu1UJl-2D7C3U,2162 pytz/zoneinfo/Europe/Volgograd,sha256=f93XuSISl0e0ULHzMnywgQk1NA5cnb4Il-XlYIpamy4,1165 pytz/zoneinfo/Europe/Warsaw,sha256=TiLDPbeVF0ckgLVEkaSeDaKZ8wctdJDOl_HE_Wd5rKs,2654 pytz/zoneinfo/Europe/Zagreb,sha256=OpWtsGFWBE_S-mYoQcAmjCta9HwbGQANnSmVY9OHCTo,1920 pytz/zoneinfo/Europe/Zaporozhye,sha256=V0dhGl3gET8OftMezf8CVy-W00Z7FtuEev5TjI2Rnyw,2106 pytz/zoneinfo/Europe/Zurich,sha256=K5QY7Ujj2VUchKR4bhhb0hgdAJhmwED71ykXDQOGKe8,1909 pytz/zoneinfo/Factory,sha256=aFFlKx93HXoJoF4SSuTlD8cZtJA-ne5oKzAa6eX2V4k,116 pytz/zoneinfo/GB,sha256=xp08wV44TZMmAdBqppttDChQAb8tRN03GcEht99RYtY,3648 pytz/zoneinfo/GB-Eire,sha256=xp08wV44TZMmAdBqppttDChQAb8tRN03GcEht99RYtY,3648 pytz/zoneinfo/GMT,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 pytz/zoneinfo/GMT+0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 pytz/zoneinfo/GMT-0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 pytz/zoneinfo/GMT0,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 pytz/zoneinfo/Greenwich,sha256=bZ83iIPAefhsA4elVHqSxEmGnYBuB94QCEqwTwJJAY0,114 pytz/zoneinfo/HST,sha256=1YkCncvgL9Z5CmUo4Vk8VbQmgA7ZAQ0PtE37j1yOli8,115 pytz/zoneinfo/Hongkong,sha256=UcnFEc9S8hMWl9giVXni4TAhLPWX0H12XvwSt4AJHew,1203 pytz/zoneinfo/Iceland,sha256=mSkaRBGZLeUrm88EeHcaWnEd35Wn-Ag2G10HtI3G2fg,1162 pytz/zoneinfo/Indian/Antananarivo,sha256=Uum8ISzpRaDh830iNkfRva-Rn6NTuuGHNWjig5C29Zo,251 pytz/zoneinfo/Indian/Chagos,sha256=23B26pwwK0gxW7TP76GltyY-RU_o6RGGSrF93pF7S1E,199 pytz/zoneinfo/Indian/Christmas,sha256=J4I0WDX_LYAJxsx2vU0EdxFJQKRE-rRL1UvNQv09pCs,165 pytz/zoneinfo/Indian/Cocos,sha256=PX-k8JpghajjvhljtBjWozaiu9NhUSpVeoACy2cAxN8,174 pytz/zoneinfo/Indian/Comoro,sha256=Uum8ISzpRaDh830iNkfRva-Rn6NTuuGHNWjig5C29Zo,251 pytz/zoneinfo/Indian/Kerguelen,sha256=oIvd6bmQFMLUefoBn4c1fQTOAawGcrPcmge2jU7BsYo,165 pytz/zoneinfo/Indian/Mahe,sha256=fFZ8A-WddgCX1zpcNg3qiGYNeKov8azY57WrPT_d8nM,165 pytz/zoneinfo/Indian/Maldives,sha256=dUQBbrmoB3odWsMt3K1YUnB447A6nkW3aR1aHzdLF7M,199 pytz/zoneinfo/Indian/Mauritius,sha256=k6vWUVcfU3gS1K12e_aMw6BeSdMvdLyCJRCAL7CD0go,241 pytz/zoneinfo/Indian/Mayotte,sha256=Uum8ISzpRaDh830iNkfRva-Rn6NTuuGHNWjig5C29Zo,251 pytz/zoneinfo/Indian/Reunion,sha256=lHnSVh7CYCuDBEM4dYsWDk006BSAznkCPxjiTtL_WiI,165 pytz/zoneinfo/Iran,sha256=ATT50Q0hK6uSba5_WnOE3Px0OWxIwxaqK5Oi10P2A-M,2582 pytz/zoneinfo/Israel,sha256=xpEJ_vI7aMV0iFD5BN9sq71vlYUZj5Q613TUUwZl1Ww,2288 pytz/zoneinfo/Jamaica,sha256=wlagieUPRf5-beie-h7QsONbNzjGsm8vMs8uf28pw28,482 pytz/zoneinfo/Japan,sha256=oCueZgRNxcNcX3ZGdif9y6Su4cyVhga4XHdwlcrYLOs,309 pytz/zoneinfo/Kwajalein,sha256=L4nH3qxv5EBKVRxYt67b9IfZfBzg5KJk19iu7x3oBMk,316 pytz/zoneinfo/Libya,sha256=W1dptGD70T7ppGoo0fczFQeDiIp0nultLNPV66MwB2c,625 pytz/zoneinfo/MET,sha256=i3CKSuP4N_PAj7o-Cbk8zPEdFs0CWWBCAfg2JXDx5V8,2094 pytz/zoneinfo/MST,sha256=6IQwvtT12Bz1pTiqFuoVxNY-4ViS7ZrYHo5nPWwzKPw,114 pytz/zoneinfo/MST7MDT,sha256=910Ek32FKoSyZWY_H19VHaVvqb-JsvnWTOOHvhrKsE0,2310 pytz/zoneinfo/Mexico/BajaNorte,sha256=OHHtvy3J70z6wvKBHgPqMEnGs6SXp8fkf0WX9ZiOODk,2342 pytz/zoneinfo/Mexico/BajaSur,sha256=aIyre-8trAXSHtqxbuu6gDDkWCUjI_SdAKPIjz74M2E,1526 pytz/zoneinfo/Mexico/General,sha256=DSpTe5TT0KBsxGx79Rs7ah-zJpiGOJKwPjztovRN0b4,1584 pytz/zoneinfo/NZ,sha256=gADjoyPo_QISQU6UJrAgcHp3HDaMoOFRdH-d23uBSyc,2437 pytz/zoneinfo/NZ-CHAT,sha256=lkVqaSF1WWpv_B2K-k2uJp2setRVK6XbjsQ38gDGVEg,2068 pytz/zoneinfo/Navajo,sha256=6_yPo1_mvnt9DgpPzr0QdHsjdsfUG6ALnagQLML1DSM,2444 pytz/zoneinfo/PRC,sha256=ZP_C5DqUQ1oEPAQNHTr36S0DGtx453N68YYbqk7u8-Y,561 pytz/zoneinfo/PST8PDT,sha256=Q7TCLkE69a6g7mPoPAkqhg-0dStyiAC0jVlM72KG_R8,2310 pytz/zoneinfo/Pacific/Apia,sha256=p1vFsjfezDCHmPOnmgG47q7wTPM5feosoWN3ucgGnrw,1097 pytz/zoneinfo/Pacific/Auckland,sha256=gADjoyPo_QISQU6UJrAgcHp3HDaMoOFRdH-d23uBSyc,2437 pytz/zoneinfo/Pacific/Bougainville,sha256=ZKDa_S_2gSlmOWizV1DqxH3wbE58rfK1vKZHZqrrtjI,268 pytz/zoneinfo/Pacific/Chatham,sha256=lkVqaSF1WWpv_B2K-k2uJp2setRVK6XbjsQ38gDGVEg,2068 pytz/zoneinfo/Pacific/Chuuk,sha256=6IYDKViuRDC_RVx1AJOxazVET6cZtdv_LFE6xbtGItI,269 pytz/zoneinfo/Pacific/Easter,sha256=paHp1QRXIa02kgd0-4V6vWXdqcwheow-hJQD9VqacfQ,2233 pytz/zoneinfo/Pacific/Efate,sha256=qMpQfM1DMNCg67In4d2-qmMLDANPbHTypP86XOtINuE,466 pytz/zoneinfo/Pacific/Enderbury,sha256=zqW7qAC_6FTcgrGEMhpIsl1oV9I46gY2nH3pwadll68,234 pytz/zoneinfo/Pacific/Fakaofo,sha256=gow-SgE5r5c8J_Ag5nvJ5SUPDg6yH8pth_a-QLDcPv8,200 pytz/zoneinfo/Pacific/Fiji,sha256=zIiVOEMXBs3GcPwqTiaG2QL0egPB8nltIU7ZyoqfaSk,1077 pytz/zoneinfo/Pacific/Funafuti,sha256=P-XYwlWQpWvS3Q_TYFe37BrgxKJy5tg7PHEQNCDGv5U,166 pytz/zoneinfo/Pacific/Galapagos,sha256=MdtlC-ffp8reICzDxsQ8tWMsTkq5ZcN-j3OyyhjokV8,238 pytz/zoneinfo/Pacific/Gambier,sha256=z6eYF8sszLjkfpqmWnbBBAUB-ibaR5nodKaAYbvXOe0,164 pytz/zoneinfo/Pacific/Guadalcanal,sha256=6GX-XpxcCyA64qUMdxJMFMq4sPk0ZjhexqGbryzfgjE,166 pytz/zoneinfo/Pacific/Guam,sha256=Ex9znmf6rNfGze6gNpZJCMr1TT4rkl2SnrhecrdJufI,494 pytz/zoneinfo/Pacific/Honolulu,sha256=fwPRv1Jk56sCOi75uZfd_Iy2k2aSQHx3B2K5xUlSPzM,329 pytz/zoneinfo/Pacific/Johnston,sha256=fwPRv1Jk56sCOi75uZfd_Iy2k2aSQHx3B2K5xUlSPzM,329 pytz/zoneinfo/Pacific/Kiritimati,sha256=VHR3iuwiv3tx65WtitVHCoQEg3VJd812VZ5djuSyUxc,238 pytz/zoneinfo/Pacific/Kosrae,sha256=Vm5AKI6NvuYSz58s8922WNIiWoqPcix2JOJOix1mlSU,351 pytz/zoneinfo/Pacific/Kwajalein,sha256=L4nH3qxv5EBKVRxYt67b9IfZfBzg5KJk19iu7x3oBMk,316 pytz/zoneinfo/Pacific/Majuro,sha256=Dwqh7gXoz7Duwu1n7XF8yEjhM4ULEs42LSQyy7F-qzQ,310 pytz/zoneinfo/Pacific/Marquesas,sha256=uzsjVolutGRXp_FRnvXoU0ApDEb4ZaYoz_r60D7jufg,173 pytz/zoneinfo/Pacific/Midway,sha256=fCYrYphYY6rUfxOw712y5cyRe104AC3pouqD3bCINFg,175 pytz/zoneinfo/Pacific/Nauru,sha256=oGxocYsqssZ_EeQHf3cUP5cg0qtqzx1BzoEjVWjE_7g,252 pytz/zoneinfo/Pacific/Niue,sha256=lSsVlJJ458vNuIgjZESQyatsJV3LpWGyHqbYXMXPjZ4,241 pytz/zoneinfo/Pacific/Norfolk,sha256=CdEXM9SKYC9Wn7aMxD2sV5i8zE88NQo25Z_L874JthI,880 pytz/zoneinfo/Pacific/Noumea,sha256=FSanpAOCE7WHQeiop4QErKV9ZC3Tzu2GxkH8-tIXsHY,304 pytz/zoneinfo/Pacific/Pago_Pago,sha256=fCYrYphYY6rUfxOw712y5cyRe104AC3pouqD3bCINFg,175 pytz/zoneinfo/Pacific/Palau,sha256=CRW__McXPlOaxo2S9kHMHaBdjv7u59ZWEwYuJConzmQ,180 pytz/zoneinfo/Pacific/Pitcairn,sha256=O65Ed1FOCF_0rEjpYPAquDwtAF3hxyJNiujgpgZV0kc,202 pytz/zoneinfo/Pacific/Pohnpei,sha256=YqXrKwjhUnxWyV6PFg1L6_zu84MfPW82dypf0S7pHtQ,303 pytz/zoneinfo/Pacific/Ponape,sha256=YqXrKwjhUnxWyV6PFg1L6_zu84MfPW82dypf0S7pHtQ,303 pytz/zoneinfo/Pacific/Port_Moresby,sha256=ei_XjmiRDLh-RU94uvz9CCIIRFH1r0X7WL-sB-6DF60,186 pytz/zoneinfo/Pacific/Rarotonga,sha256=UfUhlaG0u7yOlzoKnHE9pRiHqQ2N_M9n5WHaCCwtbV4,577 pytz/zoneinfo/Pacific/Saipan,sha256=Ex9znmf6rNfGze6gNpZJCMr1TT4rkl2SnrhecrdJufI,494 pytz/zoneinfo/Pacific/Samoa,sha256=fCYrYphYY6rUfxOw712y5cyRe104AC3pouqD3bCINFg,175 pytz/zoneinfo/Pacific/Tahiti,sha256=9iozXRFYDhBOLijmDk2mRS4Mb-LXWW1u7n790jBNKxM,165 pytz/zoneinfo/Pacific/Tarawa,sha256=vT6UxW7KeGptdh80Fj9ASATGmLx8Wai630lML4mwg80,166 pytz/zoneinfo/Pacific/Tongatapu,sha256=ht8ZhdveQXJqsxYtSEcqmRTzXA3OtqYoi4WVBvOPGhw,372 pytz/zoneinfo/Pacific/Truk,sha256=6IYDKViuRDC_RVx1AJOxazVET6cZtdv_LFE6xbtGItI,269 pytz/zoneinfo/Pacific/Wake,sha256=dTJxldgcad-kGrODwo4cAHGRSsS-K3fjeZ62WEUhmFk,166 pytz/zoneinfo/Pacific/Wallis,sha256=CAlw1H5gkER5lkvtmHY-ppoGL3hNmYxfMaXQpI0fTOE,166 pytz/zoneinfo/Pacific/Yap,sha256=6IYDKViuRDC_RVx1AJOxazVET6cZtdv_LFE6xbtGItI,269 pytz/zoneinfo/Poland,sha256=TiLDPbeVF0ckgLVEkaSeDaKZ8wctdJDOl_HE_Wd5rKs,2654 pytz/zoneinfo/Portugal,sha256=L6n3snx6pNHHJIL6JOLFOAlYkQ2J5uB_y5MG_Ic_PDU,3469 pytz/zoneinfo/ROC,sha256=DMmQwOpPql25ue3Nf8vAKKT4em06D1Z9rHbLIitxixk,761 pytz/zoneinfo/ROK,sha256=LI9LsV3XcJC0l-KoQf8zI-y7rk-du57erS-N2Ptdi7Q,617 pytz/zoneinfo/Singapore,sha256=hIgr_LHMTWh3GgeG-MmLHBp-9anUxQcfMlKFtX8WvmU,383 pytz/zoneinfo/Turkey,sha256=2S0A_f7VxvyErJMMCPqK33AChA29IVkMr1o-SpMtMxk,1947 pytz/zoneinfo/UCT,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 pytz/zoneinfo/US/Alaska,sha256=oZA1NSPS2BWdymYpnCHFO8BlYVS-ll5KLg2Ez9CbETs,2371 pytz/zoneinfo/US/Aleutian,sha256=IB1DhwJQAKbhPJ9jHLf8zW5Dad7HIkBS-dhv64E1OlM,2356 pytz/zoneinfo/US/Arizona,sha256=nEOwYOnGxENw9zW8m50PGxbtVfTrX3QYAo4x4LgOLfI,328 pytz/zoneinfo/US/Central,sha256=4aZFw-svkMyXmSpNufqzK-xveos-oVJDpEyI8Yu9HQE,3576 pytz/zoneinfo/US/East-Indiana,sha256=GrNub1_3Um5Qh67wOx58_TEAz4fwAeAlk2AlMTVA_sI,1666 pytz/zoneinfo/US/Eastern,sha256=7AoiEGjr3wV4P7C4Qs35COZqwr2mjNDq7ocpsSPFOM8,3536 pytz/zoneinfo/US/Hawaii,sha256=fwPRv1Jk56sCOi75uZfd_Iy2k2aSQHx3B2K5xUlSPzM,329 pytz/zoneinfo/US/Indiana-Starke,sha256=BiALShjiOLg1o8mMRWJ1jyTlJkgvwzte7B9WSOvTUNg,2428 pytz/zoneinfo/US/Michigan,sha256=hecz8yqY2Cj5B61G3gLZdAVZvRgK9l0P90c_gN-uD5g,2230 pytz/zoneinfo/US/Mountain,sha256=6_yPo1_mvnt9DgpPzr0QdHsjdsfUG6ALnagQLML1DSM,2444 pytz/zoneinfo/US/Pacific,sha256=VOy1PikdjiVdJ7lukVGzwl8uDxV_KYqznkTm5BLEiDM,2836 pytz/zoneinfo/US/Samoa,sha256=fCYrYphYY6rUfxOw712y5cyRe104AC3pouqD3bCINFg,175 pytz/zoneinfo/UTC,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 pytz/zoneinfo/Universal,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 pytz/zoneinfo/W-SU,sha256=KmkofRcj6T8Ph28PJChm8JVp13uRvef6TZ0GuPzUiDw,1535 pytz/zoneinfo/WET,sha256=Sc0l03EfVs_aIi17I4KyZJFkwiAHat5BgpjuuFDhgQ0,1905 pytz/zoneinfo/Zulu,sha256=i4WEZ5GrLIpUY8g6W-PAQ-JXDXRIQ01BOYlp7Ufj5vI,114 pytz/zoneinfo/iso3166.tab,sha256=BMh_yY7MXp8DMEy71jarFX3IJSNpwuEyIjIo2HKUXD4,4463 pytz/zoneinfo/leapseconds,sha256=xtVmFiPu6EYQCrewszUtc_e_IlL4dyeRZiMxoovAuZ8,3142 pytz/zoneinfo/posixrules,sha256=7AoiEGjr3wV4P7C4Qs35COZqwr2mjNDq7ocpsSPFOM8,3536 pytz/zoneinfo/tzdata.zi,sha256=Shu7fMhx2gJNwhJOMQrD-nbJm2SWVaQrxyTZ2OvbIv8,111686 pytz/zoneinfo/zone.tab,sha256=blJzl-61Ld-VdlpCeQQUdY7e7Q_uYO0BOTEUo6oxQzw,19397 pytz/zoneinfo/zone1970.tab,sha256=0jrybOT-i4x72kZZEJVqhQ3e3gfFRm0bdGzcxSSUZts,17911
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pytz-2020.1.dist-info/metadata.json
{"classifiers": ["Development Status :: 6 - Mature", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.4", "Programming Language :: Python :: 2.5", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.0", "Programming Language :: Python :: 3.1", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Software Development :: Libraries :: Python Modules"], "download_url": "https://pypi.org/project/pytz/", "extensions": {"python.details": {"contacts": [{"email": "stuart@stuartbishop.net", "name": "Stuart Bishop", "role": "author"}, {"email": "stuart@stuartbishop.net", "name": "Stuart Bishop", "role": "maintainer"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "http://pythonhosted.org/pytz"}}}, "generator": "bdist_wheel (0.30.0)", "keywords": ["timezone", "tzinfo", "datetime", "olson", "time"], "license": "MIT", "metadata_version": "2.0", "name": "pytz", "platform": "Independent", "summary": "World timezone definitions, modern and historical", "version": "2020.1"}
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pytz-2020.1.dist-info/WHEEL
Wheel-Version: 1.0 Generator: bdist_wheel (0.30.0) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pytz-2020.1.dist-info/DESCRIPTION.rst
pytz - World Timezone Definitions for Python ============================================ :Author: Stuart Bishop <stuart@stuartbishop.net> Introduction ~~~~~~~~~~~~ pytz brings the Olson tz database into Python. This library allows accurate and cross platform timezone calculations using Python 2.4 or higher. It also solves the issue of ambiguous times at the end of daylight saving time, which you can read more about in the Python Library Reference (``datetime.tzinfo``). Almost all of the Olson timezones are supported. .. note:: This library differs from the documented Python API for tzinfo implementations; if you want to create local wallclock times you need to use the ``localize()`` method documented in this document. In addition, if you perform date arithmetic on local times that cross DST boundaries, the result may be in an incorrect timezone (ie. subtract 1 minute from 2002-10-27 1:00 EST and you get 2002-10-27 0:59 EST instead of the correct 2002-10-27 1:59 EDT). A ``normalize()`` method is provided to correct this. Unfortunately these issues cannot be resolved without modifying the Python datetime implementation (see PEP-431). Installation ~~~~~~~~~~~~ This package can either be installed using ``pip`` or from a tarball using the standard Python distutils. If you are installing using ``pip``, you don't need to download anything as the latest version will be downloaded for you from PyPI:: pip install pytz If you are installing from a tarball, run the following command as an administrative user:: python setup.py install pytz for Enterprise ~~~~~~~~~~~~~~~~~~~ Available as part of the Tidelift Subscription. The maintainers of pytz and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source dependencies you use to build your applications. Save time, reduce risk, and improve code health, while paying the maintainers of the exact dependencies you use. `Learn more. <https://tidelift.com/subscription/pkg/pypi-pytz?utm_source=pypi-pytz&utm_medium=referral&utm_campaign=enterprise&utm_term=repo>`_. Example & Usage ~~~~~~~~~~~~~~~ Localized times and date arithmetic ----------------------------------- >>> from datetime import datetime, timedelta >>> from pytz import timezone >>> import pytz >>> utc = pytz.utc >>> utc.zone 'UTC' >>> eastern = timezone('US/Eastern') >>> eastern.zone 'US/Eastern' >>> amsterdam = timezone('Europe/Amsterdam') >>> fmt = '%Y-%m-%d %H:%M:%S %Z%z' This library only supports two ways of building a localized time. The first is to use the ``localize()`` method provided by the pytz library. This is used to localize a naive datetime (datetime with no timezone information): >>> loc_dt = eastern.localize(datetime(2002, 10, 27, 6, 0, 0)) >>> print(loc_dt.strftime(fmt)) 2002-10-27 06:00:00 EST-0500 The second way of building a localized time is by converting an existing localized time using the standard ``astimezone()`` method: >>> ams_dt = loc_dt.astimezone(amsterdam) >>> ams_dt.strftime(fmt) '2002-10-27 12:00:00 CET+0100' Unfortunately using the tzinfo argument of the standard datetime constructors ''does not work'' with pytz for many timezones. >>> datetime(2002, 10, 27, 12, 0, 0, tzinfo=amsterdam).strftime(fmt) # /!\ Does not work this way! '2002-10-27 12:00:00 LMT+0020' It is safe for timezones without daylight saving transitions though, such as UTC: >>> datetime(2002, 10, 27, 12, 0, 0, tzinfo=pytz.utc).strftime(fmt) # /!\ Not recommended except for UTC '2002-10-27 12:00:00 UTC+0000' The preferred way of dealing with times is to always work in UTC, converting to localtime only when generating output to be read by humans. >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) >>> loc_dt = utc_dt.astimezone(eastern) >>> loc_dt.strftime(fmt) '2002-10-27 01:00:00 EST-0500' This library also allows you to do date arithmetic using local times, although it is more complicated than working in UTC as you need to use the ``normalize()`` method to handle daylight saving time and other timezone transitions. In this example, ``loc_dt`` is set to the instant when daylight saving time ends in the US/Eastern timezone. >>> before = loc_dt - timedelta(minutes=10) >>> before.strftime(fmt) '2002-10-27 00:50:00 EST-0500' >>> eastern.normalize(before).strftime(fmt) '2002-10-27 01:50:00 EDT-0400' >>> after = eastern.normalize(before + timedelta(minutes=20)) >>> after.strftime(fmt) '2002-10-27 01:10:00 EST-0500' Creating local times is also tricky, and the reason why working with local times is not recommended. Unfortunately, you cannot just pass a ``tzinfo`` argument when constructing a datetime (see the next section for more details) >>> dt = datetime(2002, 10, 27, 1, 30, 0) >>> dt1 = eastern.localize(dt, is_dst=True) >>> dt1.strftime(fmt) '2002-10-27 01:30:00 EDT-0400' >>> dt2 = eastern.localize(dt, is_dst=False) >>> dt2.strftime(fmt) '2002-10-27 01:30:00 EST-0500' Converting between timezones is more easily done, using the standard astimezone method. >>> utc_dt = utc.localize(datetime.utcfromtimestamp(1143408899)) >>> utc_dt.strftime(fmt) '2006-03-26 21:34:59 UTC+0000' >>> au_tz = timezone('Australia/Sydney') >>> au_dt = utc_dt.astimezone(au_tz) >>> au_dt.strftime(fmt) '2006-03-27 08:34:59 AEDT+1100' >>> utc_dt2 = au_dt.astimezone(utc) >>> utc_dt2.strftime(fmt) '2006-03-26 21:34:59 UTC+0000' >>> utc_dt == utc_dt2 True You can take shortcuts when dealing with the UTC side of timezone conversions. ``normalize()`` and ``localize()`` are not really necessary when there are no daylight saving time transitions to deal with. >>> utc_dt = datetime.utcfromtimestamp(1143408899).replace(tzinfo=utc) >>> utc_dt.strftime(fmt) '2006-03-26 21:34:59 UTC+0000' >>> au_tz = timezone('Australia/Sydney') >>> au_dt = au_tz.normalize(utc_dt.astimezone(au_tz)) >>> au_dt.strftime(fmt) '2006-03-27 08:34:59 AEDT+1100' >>> utc_dt2 = au_dt.astimezone(utc) >>> utc_dt2.strftime(fmt) '2006-03-26 21:34:59 UTC+0000' ``tzinfo`` API -------------- The ``tzinfo`` instances returned by the ``timezone()`` function have been extended to cope with ambiguous times by adding an ``is_dst`` parameter to the ``utcoffset()``, ``dst()`` && ``tzname()`` methods. >>> tz = timezone('America/St_Johns') >>> normal = datetime(2009, 9, 1) >>> ambiguous = datetime(2009, 10, 31, 23, 30) The ``is_dst`` parameter is ignored for most timestamps. It is only used during DST transition ambiguous periods to resolve that ambiguity. >>> print(tz.utcoffset(normal, is_dst=True)) -1 day, 21:30:00 >>> print(tz.dst(normal, is_dst=True)) 1:00:00 >>> tz.tzname(normal, is_dst=True) 'NDT' >>> print(tz.utcoffset(ambiguous, is_dst=True)) -1 day, 21:30:00 >>> print(tz.dst(ambiguous, is_dst=True)) 1:00:00 >>> tz.tzname(ambiguous, is_dst=True) 'NDT' >>> print(tz.utcoffset(normal, is_dst=False)) -1 day, 21:30:00 >>> tz.dst(normal, is_dst=False) datetime.timedelta(0, 3600) >>> tz.tzname(normal, is_dst=False) 'NDT' >>> print(tz.utcoffset(ambiguous, is_dst=False)) -1 day, 20:30:00 >>> tz.dst(ambiguous, is_dst=False) datetime.timedelta(0) >>> tz.tzname(ambiguous, is_dst=False) 'NST' If ``is_dst`` is not specified, ambiguous timestamps will raise an ``pytz.exceptions.AmbiguousTimeError`` exception. >>> print(tz.utcoffset(normal)) -1 day, 21:30:00 >>> print(tz.dst(normal)) 1:00:00 >>> tz.tzname(normal) 'NDT' >>> import pytz.exceptions >>> try: ... tz.utcoffset(ambiguous) ... except pytz.exceptions.AmbiguousTimeError: ... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous) pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00 >>> try: ... tz.dst(ambiguous) ... except pytz.exceptions.AmbiguousTimeError: ... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous) pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00 >>> try: ... tz.tzname(ambiguous) ... except pytz.exceptions.AmbiguousTimeError: ... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous) pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00 Problems with Localtime ~~~~~~~~~~~~~~~~~~~~~~~ The major problem we have to deal with is that certain datetimes may occur twice in a year. For example, in the US/Eastern timezone on the last Sunday morning in October, the following sequence happens: - 01:00 EDT occurs - 1 hour later, instead of 2:00am the clock is turned back 1 hour and 01:00 happens again (this time 01:00 EST) In fact, every instant between 01:00 and 02:00 occurs twice. This means that if you try and create a time in the 'US/Eastern' timezone the standard datetime syntax, there is no way to specify if you meant before of after the end-of-daylight-saving-time transition. Using the pytz custom syntax, the best you can do is make an educated guess: >>> loc_dt = eastern.localize(datetime(2002, 10, 27, 1, 30, 00)) >>> loc_dt.strftime(fmt) '2002-10-27 01:30:00 EST-0500' As you can see, the system has chosen one for you and there is a 50% chance of it being out by one hour. For some applications, this does not matter. However, if you are trying to schedule meetings with people in different timezones or analyze log files it is not acceptable. The best and simplest solution is to stick with using UTC. The pytz package encourages using UTC for internal timezone representation by including a special UTC implementation based on the standard Python reference implementation in the Python documentation. The UTC timezone unpickles to be the same instance, and pickles to a smaller size than other pytz tzinfo instances. The UTC implementation can be obtained as pytz.utc, pytz.UTC, or pytz.timezone('UTC'). >>> import pickle, pytz >>> dt = datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc) >>> naive = dt.replace(tzinfo=None) >>> p = pickle.dumps(dt, 1) >>> naive_p = pickle.dumps(naive, 1) >>> len(p) - len(naive_p) 17 >>> new = pickle.loads(p) >>> new == dt True >>> new is dt False >>> new.tzinfo is dt.tzinfo True >>> pytz.utc is pytz.UTC is pytz.timezone('UTC') True Note that some other timezones are commonly thought of as the same (GMT, Greenwich, Universal, etc.). The definition of UTC is distinct from these other timezones, and they are not equivalent. For this reason, they will not compare the same in Python. >>> utc == pytz.timezone('GMT') False See the section `What is UTC`_, below. If you insist on working with local times, this library provides a facility for constructing them unambiguously: >>> loc_dt = datetime(2002, 10, 27, 1, 30, 00) >>> est_dt = eastern.localize(loc_dt, is_dst=True) >>> edt_dt = eastern.localize(loc_dt, is_dst=False) >>> print(est_dt.strftime(fmt) + ' / ' + edt_dt.strftime(fmt)) 2002-10-27 01:30:00 EDT-0400 / 2002-10-27 01:30:00 EST-0500 If you pass None as the is_dst flag to localize(), pytz will refuse to guess and raise exceptions if you try to build ambiguous or non-existent times. For example, 1:30am on 27th Oct 2002 happened twice in the US/Eastern timezone when the clocks where put back at the end of Daylight Saving Time: >>> dt = datetime(2002, 10, 27, 1, 30, 00) >>> try: ... eastern.localize(dt, is_dst=None) ... except pytz.exceptions.AmbiguousTimeError: ... print('pytz.exceptions.AmbiguousTimeError: %s' % dt) pytz.exceptions.AmbiguousTimeError: 2002-10-27 01:30:00 Similarly, 2:30am on 7th April 2002 never happened at all in the US/Eastern timezone, as the clocks where put forward at 2:00am skipping the entire hour: >>> dt = datetime(2002, 4, 7, 2, 30, 00) >>> try: ... eastern.localize(dt, is_dst=None) ... except pytz.exceptions.NonExistentTimeError: ... print('pytz.exceptions.NonExistentTimeError: %s' % dt) pytz.exceptions.NonExistentTimeError: 2002-04-07 02:30:00 Both of these exceptions share a common base class to make error handling easier: >>> isinstance(pytz.AmbiguousTimeError(), pytz.InvalidTimeError) True >>> isinstance(pytz.NonExistentTimeError(), pytz.InvalidTimeError) True A special case is where countries change their timezone definitions with no daylight savings time switch. For example, in 1915 Warsaw switched from Warsaw time to Central European time with no daylight savings transition. So at the stroke of midnight on August 5th 1915 the clocks were wound back 24 minutes creating an ambiguous time period that cannot be specified without referring to the timezone abbreviation or the actual UTC offset. In this case midnight happened twice, neither time during a daylight saving time period. pytz handles this transition by treating the ambiguous period before the switch as daylight savings time, and the ambiguous period after as standard time. >>> warsaw = pytz.timezone('Europe/Warsaw') >>> amb_dt1 = warsaw.localize(datetime(1915, 8, 4, 23, 59, 59), is_dst=True) >>> amb_dt1.strftime(fmt) '1915-08-04 23:59:59 WMT+0124' >>> amb_dt2 = warsaw.localize(datetime(1915, 8, 4, 23, 59, 59), is_dst=False) >>> amb_dt2.strftime(fmt) '1915-08-04 23:59:59 CET+0100' >>> switch_dt = warsaw.localize(datetime(1915, 8, 5, 00, 00, 00), is_dst=False) >>> switch_dt.strftime(fmt) '1915-08-05 00:00:00 CET+0100' >>> str(switch_dt - amb_dt1) '0:24:01' >>> str(switch_dt - amb_dt2) '0:00:01' The best way of creating a time during an ambiguous time period is by converting from another timezone such as UTC: >>> utc_dt = datetime(1915, 8, 4, 22, 36, tzinfo=pytz.utc) >>> utc_dt.astimezone(warsaw).strftime(fmt) '1915-08-04 23:36:00 CET+0100' The standard Python way of handling all these ambiguities is not to handle them, such as demonstrated in this example using the US/Eastern timezone definition from the Python documentation (Note that this implementation only works for dates between 1987 and 2006 - it is included for tests only!): >>> from pytz.reference import Eastern # pytz.reference only for tests >>> dt = datetime(2002, 10, 27, 0, 30, tzinfo=Eastern) >>> str(dt) '2002-10-27 00:30:00-04:00' >>> str(dt + timedelta(hours=1)) '2002-10-27 01:30:00-05:00' >>> str(dt + timedelta(hours=2)) '2002-10-27 02:30:00-05:00' >>> str(dt + timedelta(hours=3)) '2002-10-27 03:30:00-05:00' Notice the first two results? At first glance you might think they are correct, but taking the UTC offset into account you find that they are actually two hours appart instead of the 1 hour we asked for. >>> from pytz.reference import UTC # pytz.reference only for tests >>> str(dt.astimezone(UTC)) '2002-10-27 04:30:00+00:00' >>> str((dt + timedelta(hours=1)).astimezone(UTC)) '2002-10-27 06:30:00+00:00' Country Information ~~~~~~~~~~~~~~~~~~~ A mechanism is provided to access the timezones commonly in use for a particular country, looked up using the ISO 3166 country code. It returns a list of strings that can be used to retrieve the relevant tzinfo instance using ``pytz.timezone()``: >>> print(' '.join(pytz.country_timezones['nz'])) Pacific/Auckland Pacific/Chatham The Olson database comes with a ISO 3166 country code to English country name mapping that pytz exposes as a dictionary: >>> print(pytz.country_names['nz']) New Zealand What is UTC ~~~~~~~~~~~ 'UTC' is `Coordinated Universal Time`_. It is a successor to, but distinct from, Greenwich Mean Time (GMT) and the various definitions of Universal Time. UTC is now the worldwide standard for regulating clocks and time measurement. All other timezones are defined relative to UTC, and include offsets like UTC+0800 - hours to add or subtract from UTC to derive the local time. No daylight saving time occurs in UTC, making it a useful timezone to perform date arithmetic without worrying about the confusion and ambiguities caused by daylight saving time transitions, your country changing its timezone, or mobile computers that roam through multiple timezones. .. _Coordinated Universal Time: https://en.wikipedia.org/wiki/Coordinated_Universal_Time Helpers ~~~~~~~ There are two lists of timezones provided. ``all_timezones`` is the exhaustive list of the timezone names that can be used. >>> from pytz import all_timezones >>> len(all_timezones) >= 500 True >>> 'Etc/Greenwich' in all_timezones True ``common_timezones`` is a list of useful, current timezones. It doesn't contain deprecated zones or historical zones, except for a few I've deemed in common usage, such as US/Eastern (open a bug report if you think other timezones are deserving of being included here). It is also a sequence of strings. >>> from pytz import common_timezones >>> len(common_timezones) < len(all_timezones) True >>> 'Etc/Greenwich' in common_timezones False >>> 'Australia/Melbourne' in common_timezones True >>> 'US/Eastern' in common_timezones True >>> 'Canada/Eastern' in common_timezones True >>> 'Australia/Yancowinna' in all_timezones True >>> 'Australia/Yancowinna' in common_timezones False Both ``common_timezones`` and ``all_timezones`` are alphabetically sorted: >>> common_timezones_dupe = common_timezones[:] >>> common_timezones_dupe.sort() >>> common_timezones == common_timezones_dupe True >>> all_timezones_dupe = all_timezones[:] >>> all_timezones_dupe.sort() >>> all_timezones == all_timezones_dupe True ``all_timezones`` and ``common_timezones`` are also available as sets. >>> from pytz import all_timezones_set, common_timezones_set >>> 'US/Eastern' in all_timezones_set True >>> 'US/Eastern' in common_timezones_set True >>> 'Australia/Victoria' in common_timezones_set False You can also retrieve lists of timezones used by particular countries using the ``country_timezones()`` function. It requires an ISO-3166 two letter country code. >>> from pytz import country_timezones >>> print(' '.join(country_timezones('ch'))) Europe/Zurich >>> print(' '.join(country_timezones('CH'))) Europe/Zurich Internationalization - i18n/l10n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pytz is an interface to the IANA database, which uses ASCII names. The `Unicode Consortium's Unicode Locales (CLDR) <http://cldr.unicode.org>`_ project provides translations. Thomas Khyn's `l18n <https://pypi.org/project/l18n/>`_ package can be used to access these translations from Python. License ~~~~~~~ MIT license. This code is also available as part of Zope 3 under the Zope Public License, Version 2.1 (ZPL). I'm happy to relicense this code if necessary for inclusion in other open source projects. Latest Versions ~~~~~~~~~~~~~~~ This package will be updated after releases of the Olson timezone database. The latest version can be downloaded from the `Python Package Index <https://pypi.org/project/pytz/>`_. The code that is used to generate this distribution is hosted on launchpad.net and available using git:: git clone https://git.launchpad.net/pytz A mirror on github is also available at https://github.com/stub42/pytz Announcements of new releases are made on `Launchpad <https://launchpad.net/pytz>`_, and the `Atom feed <http://feeds.launchpad.net/pytz/announcements.atom>`_ hosted there. Bugs, Feature Requests & Patches ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Bugs can be reported using `Launchpad Bugs <https://bugs.launchpad.net/pytz>`_. Security Issues ~~~~~~~~~~~~~~~ Reports about security issues can be made via `Tidelift <https://tidelift.com/security>`_. Issues & Limitations ~~~~~~~~~~~~~~~~~~~~ - Offsets from UTC are rounded to the nearest whole minute, so timezones such as Europe/Amsterdam pre 1937 will be up to 30 seconds out. This is a limitation of the Python datetime library. - If you think a timezone definition is incorrect, I probably can't fix it. pytz is a direct translation of the Olson timezone database, and changes to the timezone definitions need to be made to this source. If you find errors they should be reported to the time zone mailing list, linked from http://www.iana.org/time-zones. Further Reading ~~~~~~~~~~~~~~~ More info than you want to know about timezones: http://www.twinsun.com/tz/tz-link.htm Contact ~~~~~~~ Stuart Bishop <stuart@stuartbishop.net>
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pytz-2020.1.dist-info/top_level.txt
pytz
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pytz-2020.1.dist-info/LICENSE.txt
Copyright (c) 2003-2019 Stuart Bishop <stuart@stuartbishop.net> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pytz-2020.1.dist-info/INSTALLER
pip
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pytz-2020.1.dist-info/METADATA
Metadata-Version: 2.0 Name: pytz Version: 2020.1 Summary: World timezone definitions, modern and historical Home-page: http://pythonhosted.org/pytz Author: Stuart Bishop Author-email: stuart@stuartbishop.net Maintainer: Stuart Bishop Maintainer-email: stuart@stuartbishop.net License: MIT Download-URL: https://pypi.org/project/pytz/ Keywords: timezone,tzinfo,datetime,olson,time Platform: Independent Classifier: Development Status :: 6 - Mature Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Natural Language :: English Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.4 Classifier: Programming Language :: Python :: 2.5 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.0 Classifier: Programming Language :: Python :: 3.1 Classifier: Programming Language :: Python :: 3.2 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Topic :: Software Development :: Libraries :: Python Modules pytz - World Timezone Definitions for Python ============================================ :Author: Stuart Bishop <stuart@stuartbishop.net> Introduction ~~~~~~~~~~~~ pytz brings the Olson tz database into Python. This library allows accurate and cross platform timezone calculations using Python 2.4 or higher. It also solves the issue of ambiguous times at the end of daylight saving time, which you can read more about in the Python Library Reference (``datetime.tzinfo``). Almost all of the Olson timezones are supported. .. note:: This library differs from the documented Python API for tzinfo implementations; if you want to create local wallclock times you need to use the ``localize()`` method documented in this document. In addition, if you perform date arithmetic on local times that cross DST boundaries, the result may be in an incorrect timezone (ie. subtract 1 minute from 2002-10-27 1:00 EST and you get 2002-10-27 0:59 EST instead of the correct 2002-10-27 1:59 EDT). A ``normalize()`` method is provided to correct this. Unfortunately these issues cannot be resolved without modifying the Python datetime implementation (see PEP-431). Installation ~~~~~~~~~~~~ This package can either be installed using ``pip`` or from a tarball using the standard Python distutils. If you are installing using ``pip``, you don't need to download anything as the latest version will be downloaded for you from PyPI:: pip install pytz If you are installing from a tarball, run the following command as an administrative user:: python setup.py install pytz for Enterprise ~~~~~~~~~~~~~~~~~~~ Available as part of the Tidelift Subscription. The maintainers of pytz and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source dependencies you use to build your applications. Save time, reduce risk, and improve code health, while paying the maintainers of the exact dependencies you use. `Learn more. <https://tidelift.com/subscription/pkg/pypi-pytz?utm_source=pypi-pytz&utm_medium=referral&utm_campaign=enterprise&utm_term=repo>`_. Example & Usage ~~~~~~~~~~~~~~~ Localized times and date arithmetic ----------------------------------- >>> from datetime import datetime, timedelta >>> from pytz import timezone >>> import pytz >>> utc = pytz.utc >>> utc.zone 'UTC' >>> eastern = timezone('US/Eastern') >>> eastern.zone 'US/Eastern' >>> amsterdam = timezone('Europe/Amsterdam') >>> fmt = '%Y-%m-%d %H:%M:%S %Z%z' This library only supports two ways of building a localized time. The first is to use the ``localize()`` method provided by the pytz library. This is used to localize a naive datetime (datetime with no timezone information): >>> loc_dt = eastern.localize(datetime(2002, 10, 27, 6, 0, 0)) >>> print(loc_dt.strftime(fmt)) 2002-10-27 06:00:00 EST-0500 The second way of building a localized time is by converting an existing localized time using the standard ``astimezone()`` method: >>> ams_dt = loc_dt.astimezone(amsterdam) >>> ams_dt.strftime(fmt) '2002-10-27 12:00:00 CET+0100' Unfortunately using the tzinfo argument of the standard datetime constructors ''does not work'' with pytz for many timezones. >>> datetime(2002, 10, 27, 12, 0, 0, tzinfo=amsterdam).strftime(fmt) # /!\ Does not work this way! '2002-10-27 12:00:00 LMT+0020' It is safe for timezones without daylight saving transitions though, such as UTC: >>> datetime(2002, 10, 27, 12, 0, 0, tzinfo=pytz.utc).strftime(fmt) # /!\ Not recommended except for UTC '2002-10-27 12:00:00 UTC+0000' The preferred way of dealing with times is to always work in UTC, converting to localtime only when generating output to be read by humans. >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) >>> loc_dt = utc_dt.astimezone(eastern) >>> loc_dt.strftime(fmt) '2002-10-27 01:00:00 EST-0500' This library also allows you to do date arithmetic using local times, although it is more complicated than working in UTC as you need to use the ``normalize()`` method to handle daylight saving time and other timezone transitions. In this example, ``loc_dt`` is set to the instant when daylight saving time ends in the US/Eastern timezone. >>> before = loc_dt - timedelta(minutes=10) >>> before.strftime(fmt) '2002-10-27 00:50:00 EST-0500' >>> eastern.normalize(before).strftime(fmt) '2002-10-27 01:50:00 EDT-0400' >>> after = eastern.normalize(before + timedelta(minutes=20)) >>> after.strftime(fmt) '2002-10-27 01:10:00 EST-0500' Creating local times is also tricky, and the reason why working with local times is not recommended. Unfortunately, you cannot just pass a ``tzinfo`` argument when constructing a datetime (see the next section for more details) >>> dt = datetime(2002, 10, 27, 1, 30, 0) >>> dt1 = eastern.localize(dt, is_dst=True) >>> dt1.strftime(fmt) '2002-10-27 01:30:00 EDT-0400' >>> dt2 = eastern.localize(dt, is_dst=False) >>> dt2.strftime(fmt) '2002-10-27 01:30:00 EST-0500' Converting between timezones is more easily done, using the standard astimezone method. >>> utc_dt = utc.localize(datetime.utcfromtimestamp(1143408899)) >>> utc_dt.strftime(fmt) '2006-03-26 21:34:59 UTC+0000' >>> au_tz = timezone('Australia/Sydney') >>> au_dt = utc_dt.astimezone(au_tz) >>> au_dt.strftime(fmt) '2006-03-27 08:34:59 AEDT+1100' >>> utc_dt2 = au_dt.astimezone(utc) >>> utc_dt2.strftime(fmt) '2006-03-26 21:34:59 UTC+0000' >>> utc_dt == utc_dt2 True You can take shortcuts when dealing with the UTC side of timezone conversions. ``normalize()`` and ``localize()`` are not really necessary when there are no daylight saving time transitions to deal with. >>> utc_dt = datetime.utcfromtimestamp(1143408899).replace(tzinfo=utc) >>> utc_dt.strftime(fmt) '2006-03-26 21:34:59 UTC+0000' >>> au_tz = timezone('Australia/Sydney') >>> au_dt = au_tz.normalize(utc_dt.astimezone(au_tz)) >>> au_dt.strftime(fmt) '2006-03-27 08:34:59 AEDT+1100' >>> utc_dt2 = au_dt.astimezone(utc) >>> utc_dt2.strftime(fmt) '2006-03-26 21:34:59 UTC+0000' ``tzinfo`` API -------------- The ``tzinfo`` instances returned by the ``timezone()`` function have been extended to cope with ambiguous times by adding an ``is_dst`` parameter to the ``utcoffset()``, ``dst()`` && ``tzname()`` methods. >>> tz = timezone('America/St_Johns') >>> normal = datetime(2009, 9, 1) >>> ambiguous = datetime(2009, 10, 31, 23, 30) The ``is_dst`` parameter is ignored for most timestamps. It is only used during DST transition ambiguous periods to resolve that ambiguity. >>> print(tz.utcoffset(normal, is_dst=True)) -1 day, 21:30:00 >>> print(tz.dst(normal, is_dst=True)) 1:00:00 >>> tz.tzname(normal, is_dst=True) 'NDT' >>> print(tz.utcoffset(ambiguous, is_dst=True)) -1 day, 21:30:00 >>> print(tz.dst(ambiguous, is_dst=True)) 1:00:00 >>> tz.tzname(ambiguous, is_dst=True) 'NDT' >>> print(tz.utcoffset(normal, is_dst=False)) -1 day, 21:30:00 >>> tz.dst(normal, is_dst=False) datetime.timedelta(0, 3600) >>> tz.tzname(normal, is_dst=False) 'NDT' >>> print(tz.utcoffset(ambiguous, is_dst=False)) -1 day, 20:30:00 >>> tz.dst(ambiguous, is_dst=False) datetime.timedelta(0) >>> tz.tzname(ambiguous, is_dst=False) 'NST' If ``is_dst`` is not specified, ambiguous timestamps will raise an ``pytz.exceptions.AmbiguousTimeError`` exception. >>> print(tz.utcoffset(normal)) -1 day, 21:30:00 >>> print(tz.dst(normal)) 1:00:00 >>> tz.tzname(normal) 'NDT' >>> import pytz.exceptions >>> try: ... tz.utcoffset(ambiguous) ... except pytz.exceptions.AmbiguousTimeError: ... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous) pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00 >>> try: ... tz.dst(ambiguous) ... except pytz.exceptions.AmbiguousTimeError: ... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous) pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00 >>> try: ... tz.tzname(ambiguous) ... except pytz.exceptions.AmbiguousTimeError: ... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous) pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00 Problems with Localtime ~~~~~~~~~~~~~~~~~~~~~~~ The major problem we have to deal with is that certain datetimes may occur twice in a year. For example, in the US/Eastern timezone on the last Sunday morning in October, the following sequence happens: - 01:00 EDT occurs - 1 hour later, instead of 2:00am the clock is turned back 1 hour and 01:00 happens again (this time 01:00 EST) In fact, every instant between 01:00 and 02:00 occurs twice. This means that if you try and create a time in the 'US/Eastern' timezone the standard datetime syntax, there is no way to specify if you meant before of after the end-of-daylight-saving-time transition. Using the pytz custom syntax, the best you can do is make an educated guess: >>> loc_dt = eastern.localize(datetime(2002, 10, 27, 1, 30, 00)) >>> loc_dt.strftime(fmt) '2002-10-27 01:30:00 EST-0500' As you can see, the system has chosen one for you and there is a 50% chance of it being out by one hour. For some applications, this does not matter. However, if you are trying to schedule meetings with people in different timezones or analyze log files it is not acceptable. The best and simplest solution is to stick with using UTC. The pytz package encourages using UTC for internal timezone representation by including a special UTC implementation based on the standard Python reference implementation in the Python documentation. The UTC timezone unpickles to be the same instance, and pickles to a smaller size than other pytz tzinfo instances. The UTC implementation can be obtained as pytz.utc, pytz.UTC, or pytz.timezone('UTC'). >>> import pickle, pytz >>> dt = datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc) >>> naive = dt.replace(tzinfo=None) >>> p = pickle.dumps(dt, 1) >>> naive_p = pickle.dumps(naive, 1) >>> len(p) - len(naive_p) 17 >>> new = pickle.loads(p) >>> new == dt True >>> new is dt False >>> new.tzinfo is dt.tzinfo True >>> pytz.utc is pytz.UTC is pytz.timezone('UTC') True Note that some other timezones are commonly thought of as the same (GMT, Greenwich, Universal, etc.). The definition of UTC is distinct from these other timezones, and they are not equivalent. For this reason, they will not compare the same in Python. >>> utc == pytz.timezone('GMT') False See the section `What is UTC`_, below. If you insist on working with local times, this library provides a facility for constructing them unambiguously: >>> loc_dt = datetime(2002, 10, 27, 1, 30, 00) >>> est_dt = eastern.localize(loc_dt, is_dst=True) >>> edt_dt = eastern.localize(loc_dt, is_dst=False) >>> print(est_dt.strftime(fmt) + ' / ' + edt_dt.strftime(fmt)) 2002-10-27 01:30:00 EDT-0400 / 2002-10-27 01:30:00 EST-0500 If you pass None as the is_dst flag to localize(), pytz will refuse to guess and raise exceptions if you try to build ambiguous or non-existent times. For example, 1:30am on 27th Oct 2002 happened twice in the US/Eastern timezone when the clocks where put back at the end of Daylight Saving Time: >>> dt = datetime(2002, 10, 27, 1, 30, 00) >>> try: ... eastern.localize(dt, is_dst=None) ... except pytz.exceptions.AmbiguousTimeError: ... print('pytz.exceptions.AmbiguousTimeError: %s' % dt) pytz.exceptions.AmbiguousTimeError: 2002-10-27 01:30:00 Similarly, 2:30am on 7th April 2002 never happened at all in the US/Eastern timezone, as the clocks where put forward at 2:00am skipping the entire hour: >>> dt = datetime(2002, 4, 7, 2, 30, 00) >>> try: ... eastern.localize(dt, is_dst=None) ... except pytz.exceptions.NonExistentTimeError: ... print('pytz.exceptions.NonExistentTimeError: %s' % dt) pytz.exceptions.NonExistentTimeError: 2002-04-07 02:30:00 Both of these exceptions share a common base class to make error handling easier: >>> isinstance(pytz.AmbiguousTimeError(), pytz.InvalidTimeError) True >>> isinstance(pytz.NonExistentTimeError(), pytz.InvalidTimeError) True A special case is where countries change their timezone definitions with no daylight savings time switch. For example, in 1915 Warsaw switched from Warsaw time to Central European time with no daylight savings transition. So at the stroke of midnight on August 5th 1915 the clocks were wound back 24 minutes creating an ambiguous time period that cannot be specified without referring to the timezone abbreviation or the actual UTC offset. In this case midnight happened twice, neither time during a daylight saving time period. pytz handles this transition by treating the ambiguous period before the switch as daylight savings time, and the ambiguous period after as standard time. >>> warsaw = pytz.timezone('Europe/Warsaw') >>> amb_dt1 = warsaw.localize(datetime(1915, 8, 4, 23, 59, 59), is_dst=True) >>> amb_dt1.strftime(fmt) '1915-08-04 23:59:59 WMT+0124' >>> amb_dt2 = warsaw.localize(datetime(1915, 8, 4, 23, 59, 59), is_dst=False) >>> amb_dt2.strftime(fmt) '1915-08-04 23:59:59 CET+0100' >>> switch_dt = warsaw.localize(datetime(1915, 8, 5, 00, 00, 00), is_dst=False) >>> switch_dt.strftime(fmt) '1915-08-05 00:00:00 CET+0100' >>> str(switch_dt - amb_dt1) '0:24:01' >>> str(switch_dt - amb_dt2) '0:00:01' The best way of creating a time during an ambiguous time period is by converting from another timezone such as UTC: >>> utc_dt = datetime(1915, 8, 4, 22, 36, tzinfo=pytz.utc) >>> utc_dt.astimezone(warsaw).strftime(fmt) '1915-08-04 23:36:00 CET+0100' The standard Python way of handling all these ambiguities is not to handle them, such as demonstrated in this example using the US/Eastern timezone definition from the Python documentation (Note that this implementation only works for dates between 1987 and 2006 - it is included for tests only!): >>> from pytz.reference import Eastern # pytz.reference only for tests >>> dt = datetime(2002, 10, 27, 0, 30, tzinfo=Eastern) >>> str(dt) '2002-10-27 00:30:00-04:00' >>> str(dt + timedelta(hours=1)) '2002-10-27 01:30:00-05:00' >>> str(dt + timedelta(hours=2)) '2002-10-27 02:30:00-05:00' >>> str(dt + timedelta(hours=3)) '2002-10-27 03:30:00-05:00' Notice the first two results? At first glance you might think they are correct, but taking the UTC offset into account you find that they are actually two hours appart instead of the 1 hour we asked for. >>> from pytz.reference import UTC # pytz.reference only for tests >>> str(dt.astimezone(UTC)) '2002-10-27 04:30:00+00:00' >>> str((dt + timedelta(hours=1)).astimezone(UTC)) '2002-10-27 06:30:00+00:00' Country Information ~~~~~~~~~~~~~~~~~~~ A mechanism is provided to access the timezones commonly in use for a particular country, looked up using the ISO 3166 country code. It returns a list of strings that can be used to retrieve the relevant tzinfo instance using ``pytz.timezone()``: >>> print(' '.join(pytz.country_timezones['nz'])) Pacific/Auckland Pacific/Chatham The Olson database comes with a ISO 3166 country code to English country name mapping that pytz exposes as a dictionary: >>> print(pytz.country_names['nz']) New Zealand What is UTC ~~~~~~~~~~~ 'UTC' is `Coordinated Universal Time`_. It is a successor to, but distinct from, Greenwich Mean Time (GMT) and the various definitions of Universal Time. UTC is now the worldwide standard for regulating clocks and time measurement. All other timezones are defined relative to UTC, and include offsets like UTC+0800 - hours to add or subtract from UTC to derive the local time. No daylight saving time occurs in UTC, making it a useful timezone to perform date arithmetic without worrying about the confusion and ambiguities caused by daylight saving time transitions, your country changing its timezone, or mobile computers that roam through multiple timezones. .. _Coordinated Universal Time: https://en.wikipedia.org/wiki/Coordinated_Universal_Time Helpers ~~~~~~~ There are two lists of timezones provided. ``all_timezones`` is the exhaustive list of the timezone names that can be used. >>> from pytz import all_timezones >>> len(all_timezones) >= 500 True >>> 'Etc/Greenwich' in all_timezones True ``common_timezones`` is a list of useful, current timezones. It doesn't contain deprecated zones or historical zones, except for a few I've deemed in common usage, such as US/Eastern (open a bug report if you think other timezones are deserving of being included here). It is also a sequence of strings. >>> from pytz import common_timezones >>> len(common_timezones) < len(all_timezones) True >>> 'Etc/Greenwich' in common_timezones False >>> 'Australia/Melbourne' in common_timezones True >>> 'US/Eastern' in common_timezones True >>> 'Canada/Eastern' in common_timezones True >>> 'Australia/Yancowinna' in all_timezones True >>> 'Australia/Yancowinna' in common_timezones False Both ``common_timezones`` and ``all_timezones`` are alphabetically sorted: >>> common_timezones_dupe = common_timezones[:] >>> common_timezones_dupe.sort() >>> common_timezones == common_timezones_dupe True >>> all_timezones_dupe = all_timezones[:] >>> all_timezones_dupe.sort() >>> all_timezones == all_timezones_dupe True ``all_timezones`` and ``common_timezones`` are also available as sets. >>> from pytz import all_timezones_set, common_timezones_set >>> 'US/Eastern' in all_timezones_set True >>> 'US/Eastern' in common_timezones_set True >>> 'Australia/Victoria' in common_timezones_set False You can also retrieve lists of timezones used by particular countries using the ``country_timezones()`` function. It requires an ISO-3166 two letter country code. >>> from pytz import country_timezones >>> print(' '.join(country_timezones('ch'))) Europe/Zurich >>> print(' '.join(country_timezones('CH'))) Europe/Zurich Internationalization - i18n/l10n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pytz is an interface to the IANA database, which uses ASCII names. The `Unicode Consortium's Unicode Locales (CLDR) <http://cldr.unicode.org>`_ project provides translations. Thomas Khyn's `l18n <https://pypi.org/project/l18n/>`_ package can be used to access these translations from Python. License ~~~~~~~ MIT license. This code is also available as part of Zope 3 under the Zope Public License, Version 2.1 (ZPL). I'm happy to relicense this code if necessary for inclusion in other open source projects. Latest Versions ~~~~~~~~~~~~~~~ This package will be updated after releases of the Olson timezone database. The latest version can be downloaded from the `Python Package Index <https://pypi.org/project/pytz/>`_. The code that is used to generate this distribution is hosted on launchpad.net and available using git:: git clone https://git.launchpad.net/pytz A mirror on github is also available at https://github.com/stub42/pytz Announcements of new releases are made on `Launchpad <https://launchpad.net/pytz>`_, and the `Atom feed <http://feeds.launchpad.net/pytz/announcements.atom>`_ hosted there. Bugs, Feature Requests & Patches ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Bugs can be reported using `Launchpad Bugs <https://bugs.launchpad.net/pytz>`_. Security Issues ~~~~~~~~~~~~~~~ Reports about security issues can be made via `Tidelift <https://tidelift.com/security>`_. Issues & Limitations ~~~~~~~~~~~~~~~~~~~~ - Offsets from UTC are rounded to the nearest whole minute, so timezones such as Europe/Amsterdam pre 1937 will be up to 30 seconds out. This is a limitation of the Python datetime library. - If you think a timezone definition is incorrect, I probably can't fix it. pytz is a direct translation of the Olson timezone database, and changes to the timezone definitions need to be made to this source. If you find errors they should be reported to the time zone mailing list, linked from http://www.iana.org/time-zones. Further Reading ~~~~~~~~~~~~~~~ More info than you want to know about timezones: http://www.twinsun.com/tz/tz-link.htm Contact ~~~~~~~ Stuart Bishop <stuart@stuartbishop.net>
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/Flask_SQLAlchemy-2.4.3.dist-info/RECORD
Flask_SQLAlchemy-2.4.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 Flask_SQLAlchemy-2.4.3.dist-info/LICENSE.rst,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475 Flask_SQLAlchemy-2.4.3.dist-info/METADATA,sha256=ovbU56jv4wt8BHPWByT4oFVfHGHYxrCgAyxpBzYX7QU,3128 Flask_SQLAlchemy-2.4.3.dist-info/RECORD,, Flask_SQLAlchemy-2.4.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 Flask_SQLAlchemy-2.4.3.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110 Flask_SQLAlchemy-2.4.3.dist-info/top_level.txt,sha256=w2K4fNNoTh4HItoFfz2FRQShSeLcvHYrzU_sZov21QU,17 flask_sqlalchemy/__init__.py,sha256=VnC-WvdxWXy5X3_AGO9vIHnuXLY1zUmcuvPvDUcvGtA,39265 flask_sqlalchemy/__pycache__/__init__.cpython-39.pyc,, flask_sqlalchemy/__pycache__/_compat.cpython-39.pyc,, flask_sqlalchemy/__pycache__/model.cpython-39.pyc,, flask_sqlalchemy/__pycache__/utils.cpython-39.pyc,, flask_sqlalchemy/_compat.py,sha256=yua0ZSgVWwi56QpEgwaPInzkNQ9PFb7YQdvEk3dImXo,821 flask_sqlalchemy/model.py,sha256=9jBoPU1k0c4nqz2-KyYnfoE55n-1G8Zxfo2Z-ZHV0v4,4992 flask_sqlalchemy/utils.py,sha256=4eHqAbYElnJ3NbSAHhuINckoAHDABoxjleMJD0iKgyg,1390
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/Flask_SQLAlchemy-2.4.3.dist-info/WHEEL
Wheel-Version: 1.0 Generator: bdist_wheel (0.34.2) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/Flask_SQLAlchemy-2.4.3.dist-info/LICENSE.rst
Copyright 2010 Pallets Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/Flask_SQLAlchemy-2.4.3.dist-info/top_level.txt
flask_sqlalchemy
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/Flask_SQLAlchemy-2.4.3.dist-info/INSTALLER
pip
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/Flask_SQLAlchemy-2.4.3.dist-info/METADATA
Metadata-Version: 2.1 Name: Flask-SQLAlchemy Version: 2.4.3 Summary: Adds SQLAlchemy support to your Flask application. Home-page: https://github.com/pallets/flask-sqlalchemy Author: Armin Ronacher Author-email: armin.ronacher@active-4.com Maintainer: Pallets Maintainer-email: contact@palletsprojects.com License: BSD-3-Clause Project-URL: Documentation, https://flask-sqlalchemy.palletsprojects.com/ Project-URL: Code, https://github.com/pallets/flask-sqlalchemy Project-URL: Issue tracker, https://github.com/pallets/flask-sqlalchemy/issues Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: Web Environment Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content Classifier: Topic :: Software Development :: Libraries :: Python Modules Requires-Python: >= 2.7, != 3.0.*, != 3.1.*, != 3.2.*, != 3.3.* Requires-Dist: Flask (>=0.10) Requires-Dist: SQLAlchemy (>=0.8.0) Flask-SQLAlchemy ================ Flask-SQLAlchemy is an extension for `Flask`_ that adds support for `SQLAlchemy`_ to your application. It aims to simplify using SQLAlchemy with Flask by providing useful defaults and extra helpers that make it easier to accomplish common tasks. Installing ---------- Install and update using `pip`_: .. code-block:: text $ pip install -U Flask-SQLAlchemy A Simple Example ---------------- .. code-block:: python from flask import Flask from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///example.sqlite" db = SQLAlchemy(app) class User(db.Model): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String, unique=True, nullable=False) email = db.Column(db.String, unique=True, nullable=False) db.session.add(User(name="Flask", email="example@example.com")) db.session.commit() users = User.query.all() Links ----- - Documentation: https://flask-sqlalchemy.palletsprojects.com/ - Releases: https://pypi.org/project/Flask-SQLAlchemy/ - Code: https://github.com/pallets/flask-sqlalchemy - Issue tracker: https://github.com/pallets/flask-sqlalchemy/issues - Test status: https://travis-ci.org/pallets/flask-sqlalchemy - Test coverage: https://codecov.io/gh/pallets/flask-sqlalchemy .. _Flask: https://palletsprojects.com/p/flask/ .. _SQLAlchemy: https://www.sqlalchemy.org .. _pip: https://pip.pypa.io/en/stable/quickstart/
0
qxf2_public_repos/what-is-confusing-backend
qxf2_public_repos/what-is-confusing-backend/.vscode/launch.json
{ // Use IntelliSense to learn about possible attributes. // Hover to view descriptions of existing attributes. // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 "version": "0.2.0", "configurations": [ { "type": "pwa-chrome", "request": "launch", "name": "Launch Chrome against localhost", "url": "http://localhost:8080", "webRoot": "${workspaceFolder}" } ] }
0
qxf2_public_repos
qxf2_public_repos/what-do-i-do-next/run.py
""" Run this file to start the 'What next' application """ from whatnext import app if __name__ == '__main__': app.run(debug=True,port=6464)
0
qxf2_public_repos
qxf2_public_repos/what-do-i-do-next/LICENSE
MIT License Copyright (c) 2020 qxf2 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
qxf2_public_repos
qxf2_public_repos/what-do-i-do-next/requirements.txt
astroid==2.4.2 autopep8==1.5.4 certifi==2022.12.7 Click==7.0 colorama==0.4.3 Flask==1.1.2 Flask-SQLAlchemy==2.4.1 future==0.18.2 isort==5.6.4 itsdangerous==1.1.0 Jinja2==2.11.2 lazy-object-proxy==1.4.3 mando==0.6.4 MarkupSafe==1.1.1 mccabe==0.6.1 neobolt==1.7.17 neotime==1.7.4 numpy==1.18.4 pandas==1.0.3 prompt-toolkit==2.0.10 py2neo==5.0b1 pycodestyle==2.6.0 Pygments==2.3.1 pylint==2.6.0 python-dateutil==2.8.1 pytz==2019.3 radon==4.3.2 six==1.14.0 SQLAlchemy==1.3.16 toml==0.10.1 typed-ast==1.4.1 urllib3==1.24.3 wcwidth==0.1.9 Werkzeug==1.0.1 wrapt==1.12.1
0
qxf2_public_repos
qxf2_public_repos/what-do-i-do-next/setup_neo4j_db.py
""" This script setup the neo4j database by reading the CSV file. """ import os from py2neo import Graph, Node, Relationship,NodeMatcher import pandas as pd import conf.db_conf as url if __name__ == "__main__": graph = Graph(url.db_url) all_tag_list = [] final_list = [] graph.run("Match () Return 1 Limit 1") df = pd.read_csv(url.csv_file) graph.delete_all() matcher = NodeMatcher(graph) row_count = len(df) for each_tags in df['tags']: each_tag = each_tags.split('|') tag_data = [x.lower() for x in each_tag] for each_tag_data in tag_data: all_tag_list.append(each_tag_data) final_list=list(set(all_tag_list)) final_list.sort() for i,tags in enumerate(final_list): var_tag = Node("May21tag",tagName=tags) graph.create(var_tag) for i in range(row_count): answer_count = df['answer_count'][i] view_count = df['view_count'][i] tags = df['tags'][i] each_tag = tags.split('|') tag_data = [x.lower() for x in each_tag] for j,first_tag in enumerate(tag_data[:-1]): match_first_tag = matcher.match("May21tag",tagName=first_tag).first() #Adding the answer count and view count to the nodes match_first_tag['answer_count']=answer_count.item() match_first_tag['view_count']=view_count.item() graph.push(match_first_tag) for second_tag in tag_data[j+1:]: my_tag = second_tag second_tag = matcher.match("May21tag",tagName=second_tag).first() #Adding the answer count and view count to the nodes second_tag['answer_count']=answer_count.item() second_tag['view_count']=view_count.item() graph.push(second_tag) create_relationship = Relationship(match_first_tag ,"tagged",second_tag) graph.merge(create_relationship) #Check how many relations are there between the nodes check_exists =len(graph.match(nodes=[match_first_tag,second_tag],r_type="tagged"))
0
qxf2_public_repos
qxf2_public_repos/what-do-i-do-next/README.md
# what-do-i-do-next Suggestions on what tech you should learn based on the tech you already know. The guesses are based on StackOverflow data. #Setup and run 1. Install Python 3.x version 2. Install Neo4j Community version from [here](https://neo4j.com/download-center/#community) 3. Add the path for both Python and Neo4j 4. Start the Neo4j Server in your command prompt "neo4j console" 5. Open the Neo4j in http://localhost:7474,set the username/password 6. Run the localserver with "python run.py" 7. Open your browser and then run http://localhost:6464
0
qxf2_public_repos/what-do-i-do-next
qxf2_public_repos/what-do-i-do-next/whatnext/models.py
""" This module contains the db table models for the 'What next' application """ from py2neo import Graph, Node, Relationship,NodeMatcher import conf.db_conf as url class Tags: "Model the tags" def __init__(tech): tech_word = tech def fetch_nodes(self,tech_word): graph = Graph(url.db_url) matcher = NodeMatcher(graph) match_nodes = matcher.match(tagName=tech_word).first() if match_nodes == None: return None return (list(r.end_node["tagName"] for r in graph.match(nodes=(match_nodes,)).limit(6)))
0
qxf2_public_repos/what-do-i-do-next
qxf2_public_repos/what-do-i-do-next/whatnext/__init__.py
""" This module contains the initial Flask configuration for the 'What next' application """ import os from flask import Flask from flask_sqlalchemy import SQLAlchemy import conf.db_conf as url app = Flask(__name__) db_file = url.db_file app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///%s"%db_file db = SQLAlchemy(app) from whatnext import routes
0
qxf2_public_repos/what-do-i-do-next
qxf2_public_repos/what-do-i-do-next/whatnext/routes.py
""" This file contains all the endpoints for the 'What next' application """ from flask import render_template, jsonify, request from whatnext import app from whatnext.models import Tags @app.route("/",methods=["GET","POST"]) def home(): "Return the index page" if request.method == 'GET': return render_template("index.html") if request.method == 'POST': tags_object = Tags() query_string = request.form.get('terms') technologies = query_string.split(',') associated_tech_graph = {'nodes':[],'edges':[]} for tech in technologies: tech = tech.strip().lower() tech_pair_rows = tags_object.fetch_nodes(tech) if tech_pair_rows == None: data = 'error' break else: if tech not in associated_tech_graph['nodes']: associated_tech_graph['nodes'].append(tech) for row in tech_pair_rows: if row not in associated_tech_graph['nodes']: associated_tech_graph['nodes'].append(row) if [row,tech] not in associated_tech_graph['edges']: associated_tech_graph['edges'].append([row,tech]) data = 'Success' return jsonify({'graph':associated_tech_graph,'data':data})
0
qxf2_public_repos/what-do-i-do-next/whatnext/static
qxf2_public_repos/what-do-i-do-next/whatnext/static/css/whatnext_style.css
/*Stylesheet for Whatnext*/ body { padding-top: 10px; font-size: 16px; font-family: "Open Sans",serif; } h1 { font-family: "Abel", Arial, sans-serif; font-weight: 400; font-size: 40px; color: #000000; line-height: 1.0; } h2 { font-family: "Abel", Arial, sans-serif; } h3 { font-family: "Abel", Arial, sans-serif; color: #5cb85cff; } p { line-height: 2.0; } li { line-height: 2.0; } /*Src: https://stackoverflow.com/questions/5127937/how-to-center-canvas-in-html5 */ canvas { padding-left: 0; padding-right: 0; margin-left: auto; margin-right: auto; display: block; } /* Override B3 .panel adding a subtly transparent background */ .panel { background-color: rgba(255, 255, 255, 0.9); } .toppy { padding-top: 30px; } .comment { /* Src: http://codepen.io/martinwolf/pen/qlFdp */ display: block; /* Fallback for non-webkit */ display: -webkit-box; margin: 0 auto; -webkit-line-clamp: 2; -webkit-box-orient: vertical; overflow: hidden; text-overflow: ellipsis; } .p_result{ line-height: 1.8; } .margin-base-vertical { margin: 10px 0; } .whatnext_copyright { font-family: "Abel", Arial, sans-serif; font-size: 12px; } /*Add vertical space above divs*/ .top-space-5 { margin-top:5px; } .top-space-10 { margin-top:10px; } .top-space { margin-top:10px; } .top-space-15 { margin-top:15px; } .top-space-20 { margin-top:20px; } .top-space-25 { margin-top:25px; } .top-space-30 { margin-top:30px; } .top-space-40 { margin-top:40px; } .top-space-50 { margin-top:50px; } /*Logo*/ img.logo{ max-height: 100px; max-width: 250px; } grey-text { color: #7e7e7e; margin-top: 5px; } .toppy-10 { padding-top: 10px; } .toppy-20 { padding-top: 20px; } .toppy-30 { padding-top: 30px; }
0
qxf2_public_repos/what-do-i-do-next/whatnext
qxf2_public_repos/what-do-i-do-next/whatnext/templates/index.html
<html> <head> <title>What next?</title> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <!-- Bootstrap --> <link href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap.min.css" rel="stylesheet" /> <link href="https://netdna.bootstrapcdn.com/font-awesome/3.2.1/css/font-awesome.css" rel="stylesheet" /> <link href="https://fonts.googleapis.com/css?family=Abel|Open+Sans:400,600" rel="stylesheet"> <script src="https://code.jquery.com/jquery-3.4.1.min.js" integrity="sha256-CSXorXvZcTkaix6Yvo6HppcZGetbYMGWSFlBw8HfCJo=" crossorigin="anonymous"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/springy/2.8.0/springy.min.js" integrity="sha256-WmPKL3JQrpEu+5WvCq526urQCByODLWxLk4hLTZdzA0=" crossorigin="anonymous"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/springy/2.8.0/springyui.min.js" integrity="sha256-p9lwSD4M2dEw39+oiqJdXwnrWKgUPUg63X4fTaA28Ck=" crossorigin="anonymous"></script> <link href="/static/css/whatnext_style.css" rel="stylesheet"> </head> <body> <div class="container"> <div class="row"> <div class="col-md-6 col-md-offset-3"> <h3 class="margin-base-vertical text-center">Enter a few technologies that you know</h3> <input type="text" id="searchBar" class="form-control input-lg" name="number" placeholder="Enter comma separated technologies you know" required> <p class="text-center top-space-10"> <button class="btn btn-success btn-lg" id="displayGraphButton">What do I learn next?</button> </p> <p class="text-center top-space-20"> &nbsp; </p> </div> </div> </div> <script> $("#displayGraphButton").click(function(){ var validValue = true; var letters = /^([a-z,A-Z0-9 _-]+)$/ if (document.contains(document.getElementById("springyGraph"))) { document.getElementById("springyGraph").remove();} var searchBar = $("#searchBar").val(); var canvas = document.createElement("canvas"); canvas.id = "springyGraph"; canvas.width = 0.5*window.innerWidth; canvas.height = 0.5*canvas.width; document.body.appendChild(canvas); var terms = document.getElementById("searchBar").value; var graph = new Springy.Graph(); var isValid = letters.test(searchBar) if(!isValid){ $("#string").remove(); $("#searchBar").after('<span id="string" style="color:red">Please enter a valid string</span>') validValue = false } if (validValue == true){ $.ajax({ type: 'POST', url: '/', data: {'terms':terms}, success: function (result) { if (result.data == 'Success'){ graph.loadJSON(result['graph']); springy = jQuery('#springyGraph').springy({graph: graph}); } else{ alert("Enter a valid tech word") }} }); } }); </script> </body> </html>
0
qxf2_public_repos/what-do-i-do-next
qxf2_public_repos/what-do-i-do-next/conf/db_conf.py
""" This file contains the neo4j DB URL """ import os db_url = "http://username:password@127.0.0.1/db/data" csv_file = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'data/stackoverflow_data.csv')) db_file = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'data','whatnext.db'))
0
qxf2_public_repos
qxf2_public_repos/qxf2-lambdas/conftest.py
# conftest.py """ Defining pytest session details """ import pytest import time def pytest_sessionstart(session): """ pytest session start """ session.results = dict() @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """ Running wrapper method """ outcome = yield result = outcome.get_result() if result.when == 'call': item.session.results[item] = result def pytest_sessionfinish(session,exitstatus): """ pytest session finish """ print(f'\n---------printing test results------------') reporter = session.config.pluginmanager.get_plugin('terminalreporter') duration = time.time() - reporter._sessionstarttime passed_amount = sum(1 for result in session.results.values() if result.passed) failed_amount = sum(1 for result in session.results.values() if result.failed) print(f'=== {passed_amount} passed and {failed_amount} failed tests in {duration} seconds===') print(f'------------------------------------------')
0
qxf2_public_repos
qxf2_public_repos/qxf2-lambdas/LICENSE
MIT License Copyright (c) 2020 qxf2 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
qxf2_public_repos
qxf2_public_repos/qxf2-lambdas/README.md
## A collection of lambdas used at Qxf2 AWS lambda function used by Qxf2 will live in this repo. Each lambda will have it's own directory. Optionally, each lambda might have one GitHub Action related to it's deploy. ### Context We are really not sure how to distribute code among repos when maintaining a microservices style of architecture. We use a few AWS lambda functions within Qxf2. As of July 2020, they were in the repos that needed them. We moved all the lambdas into one repo to try out how this style of distributing code would work out for us. #### Prerequisite for running Unit tests and End to End tests 1.Please make sure that you have setup aws credentials of the test account such as `Region`, `Access Key` and `Secret access key` in the `~/.aws/credentials` folder 2.Create sqs queue which is subscribed to `qxf2-skype-listner` SNS topic. 3.Mention the sqs name in the `sqs_utilities_conf.py` file in the `QUEUE_URL_LIST`. 4.Minimum Python version requirement is 3.7+ ### How to run End to End tests Please run the test using command `python -m pytest tests/test_end_to_end_employee_skype_message.py -sv` ### How to run Unit test? You can run test using command `coverage run -m pytest tests/test_integration_daily_messages_lambda.py`. Before running test please ensure that you have setup with `aws_access_key_id`, `aws_secret_access_key` and `region` in the `credentials` file located under `.aws` folder. In case this folder is not available then please create a folder in the root directory and add `credentials` file there.
0
qxf2_public_repos/qxf2-lambdas
qxf2_public_repos/qxf2-lambdas/calendar_event/query_employees.py
""" Query the Employee GraphQL endpoint """ import os import requests BASE_URL = 'https://qxf2-employees.qxf2.com/graphql' def authenticate(): """ Return an authenticate code Params: None Returns: accessToken : string """ query = f"""mutation {{ auth(password: "{os.environ['calendar_password']}", username: "{os.environ['calendar_username']}") {{ accessToken refreshToken }} }} """ response = requests.post(url = BASE_URL, json = {'query': query}, timeout = 30) return response.json().get('data',{}).get('auth',{}).get('accessToken',None) def get_valid_emails(): """ Returns active employee email IDs Params: None Returns: employee_emails : list """ employee_emails = [] employees = query_all() for employee_node in employees: if employee_node['node']['isActive'] == 'Y': employee_emails.append(employee_node['node']['email']) return employee_emails def query_all(): """ allEmployees query for the GraphQL service Params: None Returns: all_employees : list of dicts """ query = """query findAllEmployees{ allEmployees{ edges{ node{ email employeeId dateJoined isActive blogAuthorName } } } }""" access_token = authenticate() headers = {'Authorization': f'Bearer {access_token}'} response = requests.get(url = BASE_URL, json = {'query': query}, headers = headers, timeout = 30) all_employees = response.json().get('data', {}).get('allEmployees', {}).get('edges', []) return all_employees
0
qxf2_public_repos/qxf2-lambdas
qxf2_public_repos/qxf2-lambdas/calendar_event/requirements.txt
google-auth-oauthlib>=0.5.2 google-api-python-client>=2.58.0
0
qxf2_public_repos/qxf2-lambdas
qxf2_public_repos/qxf2-lambdas/calendar_event/create_calendar_event.py
""" Lambda to - Create an all-day Google Calender event when event title, date and email are provided """ import json import os from google.oauth2 import service_account import googleapiclient.discovery import query_employees as gql SCOPES = ['https://www.googleapis.com/auth/calendar'] CLIENT_SECRET_FILE = json.loads(os.environ['client_secret_google_calendar']) def authenticate(email): """ Sets the required credentials to access the Google Calendar API Params: email : string Returns: Google Calendar API service object Raises: Exception for authentication issues if any """ credentials = service_account.Credentials.from_service_account_info(CLIENT_SECRET_FILE, scopes=SCOPES) delegated_credentials = credentials.with_subject(email) service = googleapiclient.discovery.build('calendar', 'v3', credentials=delegated_credentials, cache_discovery=False) return service def validate_email(emp_email): """ Validates email id against GraphQL service namely, qxf2-employees Params: emp_email : string Returns: if valid, emp_email : string Raises: Exception if invalid email """ emails = gql.get_valid_emails() if emp_email in emails: return emp_email else: raise Exception('Invalid email!') def create_event(emp_email, event_title, event_date): """ Creates Google calendar event, as per the details provided in the SQS message body Params: emp_email : string event_title : string event_date : string Returns: None prints event link on successful calendar event creation Raises: Exception if calendar event creation errors out """ service = authenticate(emp_email) event = { 'summary': event_title, 'start': { 'date': event_date }, 'end': { 'date': event_date }, 'attendees': [ {'email': emp_email} ] } calendar_event = service.events().insert(calendarId='primary', body=event).execute() print(f"Event created: {calendar_event.get('htmlLink')}") return calendar_event.get('htmlLink') def lambda_handler(event, context): """ Lambda entry point Params: event : dict context : dict Returns: response : dict Raises: General exceptions """ message = {} try: email = event['queryStringParameters']['email'] title = event['queryStringParameters']['title'] date_of_event = event['queryStringParameters']['date_of_event'] print(f'\nLambda inputs: {email}, {title}, {date_of_event}\n') if email: email = validate_email(email) if title and date_of_event: calendar_link = create_event(email, title, date_of_event) message = { 'message': calendar_link } else: print("Invalid details provided! Please provide valid email, title and date to create calendar event.") except Exception as error: raise Exception('Experienced issues while creating the Google Calendar event!') from error return { 'statusCode': 200, 'headers': {'Content-Type': 'application/json'}, 'body': json.dumps(message) }
0
qxf2_public_repos/qxf2-lambdas/qelo
qxf2_public_repos/qxf2-lambdas/qelo/store_webapps_scores/webapps_scores.py
""" Lambda to calculate and store the sub-streams scores for webapps. """ import datetime import os import sys from decimal import Decimal sys.path.append(os.path.dirname(os.path.abspath(__file__))) import dynamodb_functions as df def get_deltas_sum(substreams_deltas): "Returns sum of all deltas for the substreams." delta_users_sum = 0 for substream_deltas in substreams_deltas: delta_users_sum = delta_users_sum + int(substream_deltas['users_count']) return delta_users_sum def calculate_substreams_scores(): "Calculates the webapps substreams scores." substreams_scores = [] yesterday = (datetime.datetime.now()-datetime.timedelta(1)).strftime('%Y-%m-%d') # Get yesterday's deltas for all webapp sub-streams. yesterday_substreams = df.read_substreams(os.environ['WEBAPP_SUBSTREAMS_TABLE'], yesterday) yesterday_substreams_deltas = df.read_webapps_table(os.environ['WEBAPP_DELTAS_TABLE'], \ yesterday, yesterday_substreams) delta_users_sum = get_deltas_sum(yesterday_substreams_deltas) for substream_deltas in yesterday_substreams_deltas: if delta_users_sum == 0: substreams_scores.append({'date':substream_deltas['date'],\ 'webapp_name':substream_deltas['webapp_name'],\ 'webapp_score':Decimal(0)}) else: substreams_scores.append({'date':substream_deltas['date'],\ 'webapp_name':substream_deltas['webapp_name'],\ 'webapp_score':Decimal(str(round(\ int(substream_deltas['users_count'])/delta_users_sum, 4)))}) store_substreams_scores(substreams_scores) def store_substreams_scores(substreams_scores): "Stores the webapps sub-streams scores into DynamoDB." df.write_into_table(substreams_scores, os.environ['WEBAPP_SCORES_TABLE']) def lambda_handler(event, context): "Lambda entry point." calculate_substreams_scores() return "Calculated and stored yesterday's webapps scores, successfully into DynamoDB."
0
qxf2_public_repos/qxf2-lambdas/qelo
qxf2_public_repos/qxf2-lambdas/qelo/store_webapps_scores/dynamodb_functions.py
""" Module to contain DynamoDB operations related to webapps sub-streams scores calculation functionality of QElo. """ import boto3 from boto3.dynamodb.conditions import Key from botocore.exceptions import ClientError def init_table(table_name): "Initializes and returns the DynamoDB table resource." dynamodb = boto3.resource('dynamodb') table = dynamodb.Table(table_name) return table def extract_substream_names(substreams_data, key): "Extracts the name of substreams from the complete substreams data." substreams = [substream_data[key] for substream_data in substreams_data] return substreams def read_substreams(table_name, date): "Retrieves substreams from the corresponding DynamoDB table." # Initialize the DynamoDB table resource. table = init_table(table_name) try: filtering_exp = Key('date').eq(date) response = table.query(KeyConditionExpression=filtering_exp) data = extract_substream_names(response['Items'], 'webapp_name') if data: print(f"\n Read records for {date} successfully from DynamoDB table {table_name}.") else: print(f'\n Table {table_name} does not contain matching records for {date} !') raise Exception('\n Error while reading table.') except ClientError as dynamodb_error: print(f'\n Error while reading from table {table_name} : \n {dynamodb_error.response}') raise Exception('Exception encountered and run aborted!.') from dynamodb_error except Exception as error: raise Exception('Exception while reading data from table.') from error return data def read_webapps_table(table_name, date, substreams): "Retrieves items that match the criteria, from corresponding webapps DynamoDB table." data = [] #list of dicts # Initialize the DynamoDB table resource. table = init_table(table_name) try: for substream in substreams: response = table.get_item( Key={\ 'date':date,\ 'webapp_name':substream\ }\ ) data.append(response['Item']) if data: print(f"\n Read records for {date} successfully from DynamoDB table {table_name}.") else: print(f'\n Table {table_name} does not contain matching records for {date} !') raise Exception('\n Error while reading table.') except ClientError as dynamodb_error: print(f'\n Error while reading from table {table_name} : \n {dynamodb_error.response}') raise Exception('Exception encountered and run aborted!.') from dynamodb_error except Exception as error: raise Exception('Exception while reading data from table.') from error return data def write_into_table(items, table_name): "Writes items/records into DynamoDB table." # Initialise a DynamoDB table resource. table = init_table(table_name) try: with table.batch_writer() as batch: for item in items: batch.put_item(Item=item) print(f"\n Added yesterday's records successfully into DynamoDB table {table_name}.") except ClientError as dynamodb_error: print(f'\n Error while writing into table {table_name} : \n {dynamodb_error.response}') raise Exception('Exception encountered and run aborted!.') from dynamodb_error except Exception as error: raise Exception('Exception while inserting data into table.') from error
0
qxf2_public_repos/qxf2-lambdas/qelo
qxf2_public_repos/qxf2-lambdas/qelo/store_github_deltas/github_deltas.py
""" Calculate and store the delta values for the GitHub stats collected. - DynamoDB table format being: date, repo_name, delta_stars, delta_forks, delta_clones, delta_visitors. Note: delta is the change per day on collected stats. """ import datetime import json import os import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import dynamodb_functions as df def extract_new_substreams(current_day_substreams, previous_day_substreams): "Returns new substreams added, during the current day." new_substreams = list(set(current_day_substreams)-set(previous_day_substreams)) return new_substreams def amend_yesterday_repo_stats(yesterday, current_day_substreams,\ new_substreams, yesterday_repos_stats): "Inserts previous day stats as 0 for newly added repos." repo_index = [index for index, repo in enumerate(current_day_substreams)\ if repo in set(new_substreams)] for index, substream in zip(repo_index, new_substreams): yesterday_repos_stats.insert(index, {'date':yesterday, 'repo_name':substream,\ 'stars':0, 'forks':0,\ 'clones':0, 'visitors':0}) return yesterday_repos_stats def calculate_repo_deltas(): "Calculates the deltas for the repo stats." deltas = [] # list of dicts yesterday = (datetime.datetime.now()-datetime.timedelta(1)).strftime('%d-%b-%Y') today = datetime.datetime.now().strftime('%d-%b-%Y') previous_day_substreams = df.read_substreams(os.environ["GITHUB_SUBSTREAMS_TABLE"], yesterday) current_day_substreams = df.read_substreams(os.environ["GITHUB_SUBSTREAMS_TABLE"], today) yesterday_repos_stats = df.read_github_table(\ os.environ["GITHUB_STATS_TABLE"], yesterday, previous_day_substreams) today_repos_stats = df.read_github_table(\ os.environ["GITHUB_STATS_TABLE"], today, current_day_substreams) if len(current_day_substreams) > len(previous_day_substreams): new_substreams = extract_new_substreams(current_day_substreams, previous_day_substreams) yesterday_repos_stats = amend_yesterday_repo_stats(yesterday, current_day_substreams,\ new_substreams, yesterday_repos_stats) for (t_stats, y_stats) in zip(today_repos_stats, yesterday_repos_stats): deltas.append({'date': t_stats['date'], 'repo_name': t_stats['repo_name'],\ 'delta_stars': int(t_stats['stars'])-int(y_stats['stars']),\ 'delta_forks': int(t_stats['forks'])-int(y_stats['forks']),\ 'delta_clones': t_stats['clones'],\ 'delta_visitors': t_stats['visitors']}) store_repo_deltas(deltas) def store_repo_deltas(repos_deltas): "Stores the deltas of GitHub repos stats into DynamoDB." df.write_into_table(repos_deltas, os.environ["GITHUB_DELTAS_TABLE"]) def lambda_handler(event, context): calculate_repo_deltas() response = { 'statusCode': 200, 'body': json.dumps({ 'message' : 'github-deltas lambda executed successully!', 'deltas_status_flag' : 1}) } return response
0
qxf2_public_repos/qxf2-lambdas/qelo
qxf2_public_repos/qxf2-lambdas/qelo/store_github_deltas/dynamodb_functions.py
""" Module to contain the DynamoDB functions. """ import re import boto3 from boto3.dynamodb.conditions import Key from botocore.exceptions import ClientError def init_table(table_name): "Initializes and returns the DynamoDB table resource" dynamodb = boto3.resource('dynamodb') table = dynamodb.Table(table_name) return table def extract_substream_names(substreams_data, key): "Extracts the name of substreams from the complete substreams data." substreams = [substream_data[key] for substream_data in substreams_data] return substreams def read_substreams(table_name, date): "Retrieves substreams from the corresponding DynamoDB table." try: table = init_table(table_name) filtering_exp = Key('date').eq(date) response = table.query(KeyConditionExpression=filtering_exp) if re.search(r'^github', table_name): data = extract_substream_names(response['Items'], 'repo_name') else: data = extract_substream_names(response['Items'], 'image_name') print(f"\n Read records for {date} successfully from DynamoDB table {table_name}.") return data except ClientError as dynamodb_error: print(f'\n Error while reading from table {table_name} : \n {dynamodb_error.response}') raise Exception('Exception encountered and run aborted!.') except Exception: raise Exception('Exception while reading data from table.') def read_github_table(table_name, date, substreams): "Retrieves items that match the criterias, from corresponding GitHub DynamoDB table." data = [] #list of dicts try: table = init_table(table_name) for substream in substreams: response = table.get_item( Key={\ 'date':date,\ 'repo_name':substream\ }\ ) data.append(response['Item']) print(f"\n Read records for {date} successfully from DynamoDB table {table_name}.") return data except ClientError as dynamodb_error: print(f'\n Error while reading from table {table_name} : \n {dynamodb_error.response}') raise Exception('Exception encountered and run aborted!.') except Exception: raise Exception('Exception while reading data from table.') def write_into_table(items, table_name): "Writes items/records into DynamoDB table" try: table = init_table(table_name) with table.batch_writer() as batch: for item in items: batch.put_item(Item=item) print(f"Added today's records successfully into DynamoDB table {table_name}!") except ClientError as dynamodb_error: print(f'\n Error while writing into table {table_name} :\n {dynamodb_error.response}') except Exception as error: print(f'Exception while inserting data into {table_name} table :\n {error}')
0
qxf2_public_repos/qxf2-lambdas/qelo
qxf2_public_repos/qxf2-lambdas/qelo/store_github_scores/github_scores.py
""" Lambda function to store the GitHub substreams scores into DynamoDB every night. """ import datetime import json import os from decimal import Decimal import dynamodb_functions as df def get_deltas_sum(substreams_deltas): "Returns sum of all deltas for the GitHub substreams." delta_stars_sum = 0 delta_forks_sum = 0 delta_clones_sum = 0 delta_visitors_sum = 0 deltas_sum = [] for substream_deltas in substreams_deltas: delta_stars_sum = delta_stars_sum + int(substream_deltas['delta_stars']) delta_forks_sum = delta_forks_sum + int(substream_deltas['delta_forks']) delta_clones_sum = delta_clones_sum + int(substream_deltas['delta_clones']) delta_visitors_sum = delta_visitors_sum + int(substream_deltas['delta_visitors']) deltas_sum.insert(0, delta_stars_sum) deltas_sum.insert(1, delta_forks_sum) deltas_sum.insert(2, delta_clones_sum) deltas_sum.insert(3, delta_visitors_sum) return deltas_sum def get_stars_weight(today_substreams_deltas, delta_stars_sum): "Returns the stars weight for the current day for all GitHub sub-streams." stars_weight = [] if delta_stars_sum <= 0: stars_weight = [0 for substream_deltas in today_substreams_deltas] else: stars_weight = [round((int(substream_deltas['delta_stars'])/delta_stars_sum), 4)\ for substream_deltas in today_substreams_deltas] return stars_weight def get_forks_weight(today_substreams_deltas, delta_forks_sum): "Returns the forks weight for the current day for all GitHub sub-streams." forks_weight = [] if delta_forks_sum <= 0: forks_weight = [0 for substream_deltas in today_substreams_deltas] else: forks_weight = [round(int((substream_deltas['delta_forks'])/delta_forks_sum), 4)\ for substream_deltas in today_substreams_deltas] return forks_weight def get_clones_weight(today_substreams_deltas, delta_clones_sum): "Returns the clones weight for the current day for all GitHub sub-streams." clones_weight = [] if delta_clones_sum == 0: clones_weight = [0 for substream_deltas in today_substreams_deltas] else: clones_weight = [round((int(substream_deltas['delta_clones'])/delta_clones_sum), 4)\ for substream_deltas in today_substreams_deltas] return clones_weight def get_visitors_weight(today_substreams_deltas, delta_visitors_sum): "Returns the visitors weight for the current day for all GitHub sub-streams." visitors_weight = [] if delta_visitors_sum == 0: visitors_weight = [0 for substream_deltas in today_substreams_deltas] else: visitors_weight = [round((int(substream_deltas['delta_visitors'])/delta_visitors_sum), 4)\ for substream_deltas in today_substreams_deltas] return visitors_weight def calculate_substreams_scores(): "Calculates the scores for all GitHub repos/sub-streams." substreams_scores = [] # list of dicts. deltas_sum = [] today = datetime.datetime.now().strftime('%d-%b-%Y') # Get current day deltas for all github sub-streams. current_day_substreams = df.read_substreams(os.environ["GITHUB_SUBSTREAMS_TABLE"], today) current_day_substreams_deltas = df.read_github_table(os.environ["GITHUB_DELTAS_TABLE"], today,\ current_day_substreams) # Get the sum of all substreams deltas. deltas_sum = get_deltas_sum(current_day_substreams_deltas) # Get the current day GitHub metric weights. stars_weight = get_stars_weight(current_day_substreams_deltas,\ deltas_sum[0]) forks_weight = get_forks_weight(current_day_substreams_deltas,\ deltas_sum[1]) clones_weight = get_clones_weight(current_day_substreams_deltas,\ deltas_sum[2]) visitors_weight = get_visitors_weight(current_day_substreams_deltas,\ deltas_sum[3]) # Calculate the current day GitHub sub-streams scores. for substream_deltas, repo_stars_weight, repo_forks_weight, \ repo_clones_weight, repo_visitors_weight \ in zip(current_day_substreams_deltas, stars_weight,\ forks_weight, clones_weight, visitors_weight): substreams_scores.append({'date': substream_deltas['date'],\ 'repo_name': substream_deltas['repo_name'],\ 'repo_score': Decimal(str(round(0.25 * \ (repo_stars_weight\ + repo_forks_weight \ + repo_clones_weight \ + repo_visitors_weight), 4)))}) return substreams_scores def store_substreams_scores(): "Stores the GitHub sub-stream scores into DynamoDB." substreams_scores = [] substreams_scores = calculate_substreams_scores() df.write_into_table(substreams_scores, os.environ["GITHUB_SCORES_TABLE"]) def lambda_handler(event, context): "Lambda entry point." store_substreams_scores() response = { 'statusCode': 200, 'body': json.dumps({ 'message' : 'github-scores lambda executed successully!', 'scores_status_flag' : 1}) } return response
0
qxf2_public_repos/qxf2-lambdas/qelo
qxf2_public_repos/qxf2-lambdas/qelo/store_github_scores/dynamodb_functions.py
""" Module to contain the DynamoDB operations related to QElo. """ import re import boto3 from boto3.dynamodb.conditions import Key from botocore.exceptions import ClientError def init_table(table_name): "Initializes and returns the DynamoDB table resource." dynamodb = boto3.resource('dynamodb') table = dynamodb.Table(table_name) return table def extract_substream_names(substreams_data, key): "Extracts the name of substreams from the complete substreams data." substreams = [substream_data[key] for substream_data in substreams_data] return substreams def read_substreams(table_name, date): "Retrieves substreams from the corresponding DynamoDB table." # Initialize the DynamoDB table resource. table = init_table(table_name) try: filtering_exp = Key('date').eq(date) response = table.query(KeyConditionExpression=filtering_exp) if re.search(r'^github', table_name): data = extract_substream_names(response['Items'], 'repo_name') elif re.search(r'^docker', table_name): data = extract_substream_names(response['Items'], 'image_name') else: data = extract_substream_names(response['Items'], 'webapp_name') if data: print(f"\n Read records for {date} successfully from DynamoDB table {table_name}.") else: print(f'\n Table {table_name} does not contain matching records for {date} !') raise Exception('\n Error while reading table.') except ClientError as dynamodb_error: print(f'\n Error while reading from table {table_name} : \n {dynamodb_error.response}') raise Exception('Exception encountered and run aborted!.') from dynamodb_error except Exception as error: raise Exception('Exception while reading data from table.') from error return data def read_github_table(table_name, date, substreams): "Retrieves items that match the criteria, from corresponding GitHub DynamoDB table." data = [] #list of dicts # Initialize the DynamoDB table resource. table = init_table(table_name) try: for substream in substreams: response = table.get_item( Key={\ 'date':date,\ 'repo_name':substream\ }\ ) data.append(response['Item']) if data: print(f"\n Read records for {date} successfully from DynamoDB table {table_name}.") else: print(f'\n Table {table_name} does not contain matching records for {date} !') raise Exception('\n Error while reading table.') except ClientError as dynamodb_error: print(f'\n Error while reading from table {table_name} : \n {dynamodb_error.response}') raise Exception('Exception encountered and run aborted!.') from dynamodb_error except Exception as error: raise Exception('Exception while reading data from table.') from error return data def write_into_table(items, table_name): "Writes items/records into DynamoDB table." # Initialize the DynamoDB table resource. table = init_table(table_name) try: with table.batch_writer() as batch: for item in items: batch.put_item(Item=item) print(f"\n Added today's records successfully into DynamoDB table {table_name}.") except ClientError as dynamodb_error: print(f'\n Error while writing into table {table_name} : \n {dynamodb_error.response}') raise Exception('Exception encountered and run aborted!.') from dynamodb_error except Exception as error: raise Exception('Exception while inserting data into table.') from error
0
qxf2_public_repos/qxf2-lambdas/qelo
qxf2_public_repos/qxf2-lambdas/qelo/store_docker_scores/docker_scores.py
""" Lambda function to store the Docker substreams scores into DynamoDB every night. """ import datetime from decimal import Decimal import docker_conf as dc import dynamodb_functions as df def get_deltas_sum(substreams_deltas): "Returns sum of all deltas for the substreams." delta_pulls_sum = 0 for substream_deltas in substreams_deltas: delta_pulls_sum = delta_pulls_sum + int(substream_deltas['delta_pulls']) return delta_pulls_sum def calculate_substreams_scores(): "Calculates the scores for all Docker Hub images/sub-streams." substreams_scores = [] # list of dicts. today = datetime.datetime.now().strftime('%d-%b-%Y') # Get deltas of all Docker Hub sub-streams for the current day. current_day_substreams = df.read_substreams(dc.DOCKER_SUBSTREAMS_TABLE, today) current_day_substreams_deltas = df.read_docker_table(dc.DOCKER_DELTAS_TABLE, today,\ current_day_substreams) # Get the sum of all substreams deltas. delta_pulls_sum = get_deltas_sum(current_day_substreams_deltas) for substream_deltas in current_day_substreams_deltas: if delta_pulls_sum == 0: substreams_scores.append({'date': substream_deltas['date'],\ 'image_name': substream_deltas['image_name'],\ 'image_score': Decimal(0)}) else: substreams_scores.append({'date': substream_deltas['date'],\ 'image_name': substream_deltas['image_name'],\ 'image_score': Decimal(str(round(\ int(substream_deltas['delta_pulls'])/delta_pulls_sum\ , 4)))}) return substreams_scores def store_substreams_scores(): "Stores the Docker Hub sub-stream scores into DynamoDB." substreams_scores = [] substreams_scores = calculate_substreams_scores() df.write_into_table(substreams_scores, dc.DOCKER_SCORES_TABLE) def lambda_handler(event, context): "Lambda entry point." store_substreams_scores() return "Stored current day Docker substreams scores, successfully into DynamoDB."
0
qxf2_public_repos/qxf2-lambdas/qelo
qxf2_public_repos/qxf2-lambdas/qelo/store_docker_scores/dynamodb_functions.py
""" Module to contain the DynamoDB operations related to QElo. """ import re import boto3 from boto3.dynamodb.conditions import Key from botocore.exceptions import ClientError def init_table(table_name): "Initializes and returns the DynamoDB table resource." dynamodb = boto3.resource('dynamodb') table = dynamodb.Table(table_name) return table def extract_substream_names(substreams_data, key): "Extracts the name of substreams from the complete substreams data." substreams = [substream_data[key] for substream_data in substreams_data] return substreams def read_substreams(table_name, date): "Retrieves substreams from the corresponding DynamoDB table." # Initialize the DynamoDB table resource. table = init_table(table_name) try: filtering_exp = Key('date').eq(date) response = table.query(KeyConditionExpression=filtering_exp) if re.search(r'^github', table_name): data = extract_substream_names(response['Items'], 'repo_name') elif re.search(r'^docker', table_name): data = extract_substream_names(response['Items'], 'image_name') else: data = extract_substream_names(response['Items'], 'webapp_name') if data: print(f"\n Read records for {date} successfully from DynamoDB table {table_name}.") else: print(f'\n Table {table_name} does not contain matching records for {date} !') raise Exception('\n Error while reading table.') except ClientError as dynamodb_error: print(f'\n Error while reading from table {table_name} : \n {dynamodb_error.response}') raise Exception('Exception encountered and run aborted!.') from dynamodb_error except Exception as error: raise Exception('Exception while reading data from table.') from error return data def read_docker_table(table_name, date, substreams): "Retrieves items that match the criteria, from corresponding Docker DynamoDB table." data = [] #list of dicts # Initialize the DynamoDB table resource. table = init_table(table_name) try: for substream in substreams: response = table.get_item( Key={\ 'date':date,\ 'image_name':substream\ }\ ) data.append(response['Item']) if data: print(f"\n Read records for {date} successfully from DynamoDB table {table_name}.") else: print(f'\n Table {table_name} does not contain matching records for {date}!') raise Exception('\n Error while reading table.') except ClientError as dynamodb_error: print(f'\n Error while reading from table {table_name} : \n {dynamodb_error.response}') raise Exception('Exception encountered and run aborted!.') from dynamodb_error except Exception as error: raise Exception('Exception while reading data from table.') from error return data def write_into_table(items, table_name): "Writes items/records into DynamoDB table." # Initialize the DynamoDB table resource. table = init_table(table_name) try: with table.batch_writer() as batch: for item in items: batch.put_item(Item=item) print(f"\n Added today's records successfully into DynamoDB table {table_name}.") except ClientError as dynamodb_error: print(f'\n Error while writing into table {table_name} : \n {dynamodb_error.response}') raise Exception('Exception encountered and run aborted!.') from dynamodb_error except Exception as error: raise Exception('Exception while inserting data into table.') from error
0
qxf2_public_repos/qxf2-lambdas/qelo
qxf2_public_repos/qxf2-lambdas/qelo/store_docker_scores/docker_conf.py
"Configuration details realted to Docker Hub stream of QElo." # DynamoDB tables that store Docker Hub related data. DOCKER_SUBSTREAMS_TABLE = 'docker_substreams' DOCKER_DELTAS_TABLE = 'docker_deltas' DOCKER_SCORES_TABLE = 'docker_scores'
0
qxf2_public_repos/qxf2-lambdas/qelo
qxf2_public_repos/qxf2-lambdas/qelo/store_webapps_deltas/webapps_deltas.py
""" Lambda function to - fetch previous day's web application stats/deltas from Google Analytics and - store into DynamoDB daily. Note: 1. To add a new webapp/sub-stream, append it to conf/webapps_conf.json file. 2. Note: We are fetching yesterday's data (GMT) since we want what was there at the end of the day. So, the script is meant to run at 12:00am GMT (ie. 5:30am IST). """ import datetime import json import os import sys sys.path.append((os.path.dirname(os.path.abspath(__file__)))) import dynamodb_functions as df import api_functions as af def normalize_response(response): "Parses Google Analytics Data API v1 response and extracts total users count for web-app." user_count = 0 for row in response.rows: user_count = row.metric_values[0].value return user_count def get_webapps_substreams(): "Fetches the webapps defined in the JSON." root_dir = os.path.dirname(os.path.abspath(__file__)) webapps_substreams_json = os.path.join(root_dir, 'conf', 'webapps_conf.json') with open(webapps_substreams_json, encoding='utf-8') as file: substreams = json.load(file) return substreams def get_webapps_stats(): "Returns the stats for every configured webapps" stats = [] yesterday_date = (datetime.datetime.now()-datetime.timedelta(1)).strftime('%Y-%m-%d') all_apps = get_webapps_substreams() all_apps = all_apps.get('webapps', []) store_substreams(yesterday_date, all_apps) for web_app in all_apps: api_response = af.get_data_api_response(web_app['property_id'], yesterday_date) users_count = normalize_response(api_response) # Build the web apps stats stats.append({'date':yesterday_date,\ 'webapp_name':web_app['app_name'],\ 'users_count':users_count }) store_webapp_stats_as_deltas(stats) def store_substreams(date, web_apps): "Stores the webapp substreams." substreams = [] for web_app in web_apps: substreams.append({'date':date,\ 'webapp_name':web_app['app_name']}) df.write_into_table(substreams, os.environ['WEBAPP_SUBSTREAMS_TABLE']) def store_webapp_stats_as_deltas(stats): "Stores stats for every configured webapps as deltas" df.write_into_table(stats, os.environ['WEBAPP_DELTAS_TABLE']) def lambda_handler(event, context): "Lambda entry point." get_webapps_stats() return "Stored yesterday's webapps stats/deltas, successfully into DynamoDB."
0