repo_id
stringclasses 208
values | file_path
stringlengths 31
190
| content
stringlengths 1
2.65M
| __index_level_0__
int64 0
0
|
---|---|---|---|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/test.py
|
# -*- coding: utf-8 -*-
"""
werkzeug.test
~~~~~~~~~~~~~
This module implements a client to WSGI applications for testing.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import mimetypes
import sys
from io import BytesIO
from itertools import chain
from random import random
from tempfile import TemporaryFile
from time import time
from ._compat import iteritems
from ._compat import iterlists
from ._compat import itervalues
from ._compat import make_literal_wrapper
from ._compat import reraise
from ._compat import string_types
from ._compat import text_type
from ._compat import to_bytes
from ._compat import wsgi_encoding_dance
from ._internal import _get_environ
from .datastructures import CallbackDict
from .datastructures import CombinedMultiDict
from .datastructures import EnvironHeaders
from .datastructures import FileMultiDict
from .datastructures import Headers
from .datastructures import MultiDict
from .http import dump_cookie
from .http import dump_options_header
from .http import parse_options_header
from .urls import iri_to_uri
from .urls import url_encode
from .urls import url_fix
from .urls import url_parse
from .urls import url_unparse
from .urls import url_unquote
from .utils import get_content_type
from .wrappers import BaseRequest
from .wsgi import ClosingIterator
from .wsgi import get_current_url
try:
from urllib.request import Request as U2Request
except ImportError:
from urllib2 import Request as U2Request
try:
from http.cookiejar import CookieJar
except ImportError:
from cookielib import CookieJar
def stream_encode_multipart(
values, use_tempfile=True, threshold=1024 * 500, boundary=None, charset="utf-8"
):
"""Encode a dict of values (either strings or file descriptors or
:class:`FileStorage` objects.) into a multipart encoded string stored
in a file descriptor.
"""
if boundary is None:
boundary = "---------------WerkzeugFormPart_%s%s" % (time(), random())
_closure = [BytesIO(), 0, False]
if use_tempfile:
def write_binary(string):
stream, total_length, on_disk = _closure
if on_disk:
stream.write(string)
else:
length = len(string)
if length + _closure[1] <= threshold:
stream.write(string)
else:
new_stream = TemporaryFile("wb+")
new_stream.write(stream.getvalue())
new_stream.write(string)
_closure[0] = new_stream
_closure[2] = True
_closure[1] = total_length + length
else:
write_binary = _closure[0].write
def write(string):
write_binary(string.encode(charset))
if not isinstance(values, MultiDict):
values = MultiDict(values)
for key, values in iterlists(values):
for value in values:
write('--%s\r\nContent-Disposition: form-data; name="%s"' % (boundary, key))
reader = getattr(value, "read", None)
if reader is not None:
filename = getattr(value, "filename", getattr(value, "name", None))
content_type = getattr(value, "content_type", None)
if content_type is None:
content_type = (
filename
and mimetypes.guess_type(filename)[0]
or "application/octet-stream"
)
if filename is not None:
write('; filename="%s"\r\n' % filename)
else:
write("\r\n")
write("Content-Type: %s\r\n\r\n" % content_type)
while 1:
chunk = reader(16384)
if not chunk:
break
write_binary(chunk)
else:
if not isinstance(value, string_types):
value = str(value)
value = to_bytes(value, charset)
write("\r\n\r\n")
write_binary(value)
write("\r\n")
write("--%s--\r\n" % boundary)
length = int(_closure[0].tell())
_closure[0].seek(0)
return _closure[0], length, boundary
def encode_multipart(values, boundary=None, charset="utf-8"):
"""Like `stream_encode_multipart` but returns a tuple in the form
(``boundary``, ``data``) where data is a bytestring.
"""
stream, length, boundary = stream_encode_multipart(
values, use_tempfile=False, boundary=boundary, charset=charset
)
return boundary, stream.read()
class _TestCookieHeaders(object):
"""A headers adapter for cookielib
"""
def __init__(self, headers):
self.headers = headers
def getheaders(self, name):
headers = []
name = name.lower()
for k, v in self.headers:
if k.lower() == name:
headers.append(v)
return headers
def get_all(self, name, default=None):
rv = []
for k, v in self.headers:
if k.lower() == name.lower():
rv.append(v)
return rv or default or []
class _TestCookieResponse(object):
"""Something that looks like a httplib.HTTPResponse, but is actually just an
adapter for our test responses to make them available for cookielib.
"""
def __init__(self, headers):
self.headers = _TestCookieHeaders(headers)
def info(self):
return self.headers
class _TestCookieJar(CookieJar):
"""A cookielib.CookieJar modified to inject and read cookie headers from
and to wsgi environments, and wsgi application responses.
"""
def inject_wsgi(self, environ):
"""Inject the cookies as client headers into the server's wsgi
environment.
"""
cvals = ["%s=%s" % (c.name, c.value) for c in self]
if cvals:
environ["HTTP_COOKIE"] = "; ".join(cvals)
else:
environ.pop("HTTP_COOKIE", None)
def extract_wsgi(self, environ, headers):
"""Extract the server's set-cookie headers as cookies into the
cookie jar.
"""
self.extract_cookies(
_TestCookieResponse(headers), U2Request(get_current_url(environ))
)
def _iter_data(data):
"""Iterates over a `dict` or :class:`MultiDict` yielding all keys and
values.
This is used to iterate over the data passed to the
:class:`EnvironBuilder`.
"""
if isinstance(data, MultiDict):
for key, values in iterlists(data):
for value in values:
yield key, value
else:
for key, values in iteritems(data):
if isinstance(values, list):
for value in values:
yield key, value
else:
yield key, values
class EnvironBuilder(object):
"""This class can be used to conveniently create a WSGI environment
for testing purposes. It can be used to quickly create WSGI environments
or request objects from arbitrary data.
The signature of this class is also used in some other places as of
Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`,
:meth:`Client.open`). Because of this most of the functionality is
available through the constructor alone.
Files and regular form data can be manipulated independently of each
other with the :attr:`form` and :attr:`files` attributes, but are
passed with the same argument to the constructor: `data`.
`data` can be any of these values:
- a `str` or `bytes` object: The object is converted into an
:attr:`input_stream`, the :attr:`content_length` is set and you have to
provide a :attr:`content_type`.
- a `dict` or :class:`MultiDict`: The keys have to be strings. The values
have to be either any of the following objects, or a list of any of the
following objects:
- a :class:`file`-like object: These are converted into
:class:`FileStorage` objects automatically.
- a `tuple`: The :meth:`~FileMultiDict.add_file` method is called
with the key and the unpacked `tuple` items as positional
arguments.
- a `str`: The string is set as form data for the associated key.
- a file-like object: The object content is loaded in memory and then
handled like a regular `str` or a `bytes`.
:param path: the path of the request. In the WSGI environment this will
end up as `PATH_INFO`. If the `query_string` is not defined
and there is a question mark in the `path` everything after
it is used as query string.
:param base_url: the base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).
:param query_string: an optional string or dict with URL parameters.
:param method: the HTTP method to use, defaults to `GET`.
:param input_stream: an optional input stream. Do not specify this and
`data`. As soon as an input stream is set you can't
modify :attr:`args` and :attr:`files` unless you
set the :attr:`input_stream` to `None` again.
:param content_type: The content type for the request. As of 0.5 you
don't have to provide this when specifying files
and form data via `data`.
:param content_length: The content length for the request. You don't
have to specify this when providing data via
`data`.
:param errors_stream: an optional error stream that is used for
`wsgi.errors`. Defaults to :data:`stderr`.
:param multithread: controls `wsgi.multithread`. Defaults to `False`.
:param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`.
:param run_once: controls `wsgi.run_once`. Defaults to `False`.
:param headers: an optional list or :class:`Headers` object of headers.
:param data: a string or dict of form data or a file-object.
See explanation above.
:param json: An object to be serialized and assigned to ``data``.
Defaults the content type to ``"application/json"``.
Serialized with the function assigned to :attr:`json_dumps`.
:param environ_base: an optional dict of environment defaults.
:param environ_overrides: an optional dict of environment overrides.
:param charset: the charset used to encode unicode data.
.. versionadded:: 0.15
The ``json`` param and :meth:`json_dumps` method.
.. versionadded:: 0.15
The environ has keys ``REQUEST_URI`` and ``RAW_URI`` containing
the path before perecent-decoding. This is not part of the WSGI
PEP, but many WSGI servers include it.
.. versionchanged:: 0.6
``path`` and ``base_url`` can now be unicode strings that are
encoded with :func:`iri_to_uri`.
"""
#: the server protocol to use. defaults to HTTP/1.1
server_protocol = "HTTP/1.1"
#: the wsgi version to use. defaults to (1, 0)
wsgi_version = (1, 0)
#: the default request class for :meth:`get_request`
request_class = BaseRequest
import json
#: The serialization function used when ``json`` is passed.
json_dumps = staticmethod(json.dumps)
del json
def __init__(
self,
path="/",
base_url=None,
query_string=None,
method="GET",
input_stream=None,
content_type=None,
content_length=None,
errors_stream=None,
multithread=False,
multiprocess=False,
run_once=False,
headers=None,
data=None,
environ_base=None,
environ_overrides=None,
charset="utf-8",
mimetype=None,
json=None,
):
path_s = make_literal_wrapper(path)
if query_string is not None and path_s("?") in path:
raise ValueError("Query string is defined in the path and as an argument")
if query_string is None and path_s("?") in path:
path, query_string = path.split(path_s("?"), 1)
self.charset = charset
self.path = iri_to_uri(path)
if base_url is not None:
base_url = url_fix(iri_to_uri(base_url, charset), charset)
self.base_url = base_url
if isinstance(query_string, (bytes, text_type)):
self.query_string = query_string
else:
if query_string is None:
query_string = MultiDict()
elif not isinstance(query_string, MultiDict):
query_string = MultiDict(query_string)
self.args = query_string
self.method = method
if headers is None:
headers = Headers()
elif not isinstance(headers, Headers):
headers = Headers(headers)
self.headers = headers
if content_type is not None:
self.content_type = content_type
if errors_stream is None:
errors_stream = sys.stderr
self.errors_stream = errors_stream
self.multithread = multithread
self.multiprocess = multiprocess
self.run_once = run_once
self.environ_base = environ_base
self.environ_overrides = environ_overrides
self.input_stream = input_stream
self.content_length = content_length
self.closed = False
if json is not None:
if data is not None:
raise TypeError("can't provide both json and data")
data = self.json_dumps(json)
if self.content_type is None:
self.content_type = "application/json"
if data:
if input_stream is not None:
raise TypeError("can't provide input stream and data")
if hasattr(data, "read"):
data = data.read()
if isinstance(data, text_type):
data = data.encode(self.charset)
if isinstance(data, bytes):
self.input_stream = BytesIO(data)
if self.content_length is None:
self.content_length = len(data)
else:
for key, value in _iter_data(data):
if isinstance(value, (tuple, dict)) or hasattr(value, "read"):
self._add_file_from_data(key, value)
else:
self.form.setlistdefault(key).append(value)
if mimetype is not None:
self.mimetype = mimetype
@classmethod
def from_environ(cls, environ, **kwargs):
"""Turn an environ dict back into a builder. Any extra kwargs
override the args extracted from the environ.
.. versionadded:: 0.15
"""
headers = Headers(EnvironHeaders(environ))
out = {
"path": environ["PATH_INFO"],
"base_url": cls._make_base_url(
environ["wsgi.url_scheme"], headers.pop("Host"), environ["SCRIPT_NAME"]
),
"query_string": environ["QUERY_STRING"],
"method": environ["REQUEST_METHOD"],
"input_stream": environ["wsgi.input"],
"content_type": headers.pop("Content-Type", None),
"content_length": headers.pop("Content-Length", None),
"errors_stream": environ["wsgi.errors"],
"multithread": environ["wsgi.multithread"],
"multiprocess": environ["wsgi.multiprocess"],
"run_once": environ["wsgi.run_once"],
"headers": headers,
}
out.update(kwargs)
return cls(**out)
def _add_file_from_data(self, key, value):
"""Called in the EnvironBuilder to add files from the data dict."""
if isinstance(value, tuple):
self.files.add_file(key, *value)
else:
self.files.add_file(key, value)
@staticmethod
def _make_base_url(scheme, host, script_root):
return url_unparse((scheme, host, script_root, "", "")).rstrip("/") + "/"
@property
def base_url(self):
"""The base URL is used to extract the URL scheme, host name,
port, and root path.
"""
return self._make_base_url(self.url_scheme, self.host, self.script_root)
@base_url.setter
def base_url(self, value):
if value is None:
scheme = "http"
netloc = "localhost"
script_root = ""
else:
scheme, netloc, script_root, qs, anchor = url_parse(value)
if qs or anchor:
raise ValueError("base url must not contain a query string or fragment")
self.script_root = script_root.rstrip("/")
self.host = netloc
self.url_scheme = scheme
@property
def content_type(self):
"""The content type for the request. Reflected from and to
the :attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.
"""
ct = self.headers.get("Content-Type")
if ct is None and not self._input_stream:
if self._files:
return "multipart/form-data"
if self._form:
return "application/x-www-form-urlencoded"
return None
return ct
@content_type.setter
def content_type(self, value):
if value is None:
self.headers.pop("Content-Type", None)
else:
self.headers["Content-Type"] = value
@property
def mimetype(self):
"""The mimetype (content type without charset etc.)
.. versionadded:: 0.14
"""
ct = self.content_type
return ct.split(";")[0].strip() if ct else None
@mimetype.setter
def mimetype(self, value):
self.content_type = get_content_type(value, self.charset)
@property
def mimetype_params(self):
""" The mimetype parameters as dict. For example if the
content type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.14
"""
def on_update(d):
self.headers["Content-Type"] = dump_options_header(self.mimetype, d)
d = parse_options_header(self.headers.get("content-type", ""))[1]
return CallbackDict(d, on_update)
@property
def content_length(self):
"""The content length as integer. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.
"""
return self.headers.get("Content-Length", type=int)
@content_length.setter
def content_length(self, value):
if value is None:
self.headers.pop("Content-Length", None)
else:
self.headers["Content-Length"] = str(value)
def _get_form(self, name, storage):
"""Common behavior for getting the :attr:`form` and
:attr:`files` properties.
:param name: Name of the internal cached attribute.
:param storage: Storage class used for the data.
"""
if self.input_stream is not None:
raise AttributeError("an input stream is defined")
rv = getattr(self, name)
if rv is None:
rv = storage()
setattr(self, name, rv)
return rv
def _set_form(self, name, value):
"""Common behavior for setting the :attr:`form` and
:attr:`files` properties.
:param name: Name of the internal cached attribute.
:param value: Value to assign to the attribute.
"""
self._input_stream = None
setattr(self, name, value)
@property
def form(self):
"""A :class:`MultiDict` of form values."""
return self._get_form("_form", MultiDict)
@form.setter
def form(self, value):
self._set_form("_form", value)
@property
def files(self):
"""A :class:`FileMultiDict` of uploaded files. Use
:meth:`~FileMultiDict.add_file` to add new files.
"""
return self._get_form("_files", FileMultiDict)
@files.setter
def files(self, value):
self._set_form("_files", value)
@property
def input_stream(self):
"""An optional input stream. If you set this it will clear
:attr:`form` and :attr:`files`.
"""
return self._input_stream
@input_stream.setter
def input_stream(self, value):
self._input_stream = value
self._form = None
self._files = None
@property
def query_string(self):
"""The query string. If you set this to a string
:attr:`args` will no longer be available.
"""
if self._query_string is None:
if self._args is not None:
return url_encode(self._args, charset=self.charset)
return ""
return self._query_string
@query_string.setter
def query_string(self, value):
self._query_string = value
self._args = None
@property
def args(self):
"""The URL arguments as :class:`MultiDict`."""
if self._query_string is not None:
raise AttributeError("a query string is defined")
if self._args is None:
self._args = MultiDict()
return self._args
@args.setter
def args(self, value):
self._query_string = None
self._args = value
@property
def server_name(self):
"""The server name (read-only, use :attr:`host` to set)"""
return self.host.split(":", 1)[0]
@property
def server_port(self):
"""The server port as integer (read-only, use :attr:`host` to set)"""
pieces = self.host.split(":", 1)
if len(pieces) == 2 and pieces[1].isdigit():
return int(pieces[1])
if self.url_scheme == "https":
return 443
return 80
def __del__(self):
try:
self.close()
except Exception:
pass
def close(self):
"""Closes all files. If you put real :class:`file` objects into the
:attr:`files` dict you can call this method to automatically close
them all in one go.
"""
if self.closed:
return
try:
files = itervalues(self.files)
except AttributeError:
files = ()
for f in files:
try:
f.close()
except Exception:
pass
self.closed = True
def get_environ(self):
"""Return the built environ.
.. versionchanged:: 0.15
The content type and length headers are set based on
input stream detection. Previously this only set the WSGI
keys.
"""
input_stream = self.input_stream
content_length = self.content_length
mimetype = self.mimetype
content_type = self.content_type
if input_stream is not None:
start_pos = input_stream.tell()
input_stream.seek(0, 2)
end_pos = input_stream.tell()
input_stream.seek(start_pos)
content_length = end_pos - start_pos
elif mimetype == "multipart/form-data":
values = CombinedMultiDict([self.form, self.files])
input_stream, content_length, boundary = stream_encode_multipart(
values, charset=self.charset
)
content_type = mimetype + '; boundary="%s"' % boundary
elif mimetype == "application/x-www-form-urlencoded":
# XXX: py2v3 review
values = url_encode(self.form, charset=self.charset)
values = values.encode("ascii")
content_length = len(values)
input_stream = BytesIO(values)
else:
input_stream = BytesIO()
result = {}
if self.environ_base:
result.update(self.environ_base)
def _path_encode(x):
return wsgi_encoding_dance(url_unquote(x, self.charset), self.charset)
qs = wsgi_encoding_dance(self.query_string)
result.update(
{
"REQUEST_METHOD": self.method,
"SCRIPT_NAME": _path_encode(self.script_root),
"PATH_INFO": _path_encode(self.path),
"QUERY_STRING": qs,
# Non-standard, added by mod_wsgi, uWSGI
"REQUEST_URI": wsgi_encoding_dance(self.path),
# Non-standard, added by gunicorn
"RAW_URI": wsgi_encoding_dance(self.path),
"SERVER_NAME": self.server_name,
"SERVER_PORT": str(self.server_port),
"HTTP_HOST": self.host,
"SERVER_PROTOCOL": self.server_protocol,
"wsgi.version": self.wsgi_version,
"wsgi.url_scheme": self.url_scheme,
"wsgi.input": input_stream,
"wsgi.errors": self.errors_stream,
"wsgi.multithread": self.multithread,
"wsgi.multiprocess": self.multiprocess,
"wsgi.run_once": self.run_once,
}
)
headers = self.headers.copy()
if content_type is not None:
result["CONTENT_TYPE"] = content_type
headers.set("Content-Type", content_type)
if content_length is not None:
result["CONTENT_LENGTH"] = str(content_length)
headers.set("Content-Length", content_length)
for key, value in headers.to_wsgi_list():
result["HTTP_%s" % key.upper().replace("-", "_")] = value
if self.environ_overrides:
result.update(self.environ_overrides)
return result
def get_request(self, cls=None):
"""Returns a request with the data. If the request class is not
specified :attr:`request_class` is used.
:param cls: The request wrapper to use.
"""
if cls is None:
cls = self.request_class
return cls(self.get_environ())
class ClientRedirectError(Exception):
"""If a redirect loop is detected when using follow_redirects=True with
the :cls:`Client`, then this exception is raised.
"""
class Client(object):
"""This class allows you to send requests to a wrapped application.
The response wrapper can be a class or factory function that takes
three arguments: app_iter, status and headers. The default response
wrapper just returns a tuple.
Example::
class ClientResponse(BaseResponse):
...
client = Client(MyApplication(), response_wrapper=ClientResponse)
The use_cookies parameter indicates whether cookies should be stored and
sent for subsequent requests. This is True by default, but passing False
will disable this behaviour.
If you want to request some subdomain of your application you may set
`allow_subdomain_redirects` to `True` as if not no external redirects
are allowed.
.. versionadded:: 0.5
`use_cookies` is new in this version. Older versions did not provide
builtin cookie support.
.. versionadded:: 0.14
The `mimetype` parameter was added.
.. versionadded:: 0.15
The ``json`` parameter.
"""
def __init__(
self,
application,
response_wrapper=None,
use_cookies=True,
allow_subdomain_redirects=False,
):
self.application = application
self.response_wrapper = response_wrapper
if use_cookies:
self.cookie_jar = _TestCookieJar()
else:
self.cookie_jar = None
self.allow_subdomain_redirects = allow_subdomain_redirects
def set_cookie(
self,
server_name,
key,
value="",
max_age=None,
expires=None,
path="/",
domain=None,
secure=None,
httponly=False,
samesite=None,
charset="utf-8",
):
"""Sets a cookie in the client's cookie jar. The server name
is required and has to match the one that is also passed to
the open call.
"""
assert self.cookie_jar is not None, "cookies disabled"
header = dump_cookie(
key,
value,
max_age,
expires,
path,
domain,
secure,
httponly,
charset,
samesite=samesite,
)
environ = create_environ(path, base_url="http://" + server_name)
headers = [("Set-Cookie", header)]
self.cookie_jar.extract_wsgi(environ, headers)
def delete_cookie(self, server_name, key, path="/", domain=None):
"""Deletes a cookie in the test client."""
self.set_cookie(
server_name, key, expires=0, max_age=0, path=path, domain=domain
)
def run_wsgi_app(self, environ, buffered=False):
"""Runs the wrapped WSGI app with the given environment."""
if self.cookie_jar is not None:
self.cookie_jar.inject_wsgi(environ)
rv = run_wsgi_app(self.application, environ, buffered=buffered)
if self.cookie_jar is not None:
self.cookie_jar.extract_wsgi(environ, rv[2])
return rv
def resolve_redirect(self, response, new_location, environ, buffered=False):
"""Perform a new request to the location given by the redirect
response to the previous request.
"""
scheme, netloc, path, qs, anchor = url_parse(new_location)
builder = EnvironBuilder.from_environ(environ, query_string=qs)
to_name_parts = netloc.split(":", 1)[0].split(".")
from_name_parts = builder.server_name.split(".")
if to_name_parts != [""]:
# The new location has a host, use it for the base URL.
builder.url_scheme = scheme
builder.host = netloc
else:
# A local redirect with autocorrect_location_header=False
# doesn't have a host, so use the request's host.
to_name_parts = from_name_parts
# Explain why a redirect to a different server name won't be followed.
if to_name_parts != from_name_parts:
if to_name_parts[-len(from_name_parts) :] == from_name_parts:
if not self.allow_subdomain_redirects:
raise RuntimeError("Following subdomain redirects is not enabled.")
else:
raise RuntimeError("Following external redirects is not supported.")
path_parts = path.split("/")
root_parts = builder.script_root.split("/")
if path_parts[: len(root_parts)] == root_parts:
# Strip the script root from the path.
builder.path = path[len(builder.script_root) :]
else:
# The new location is not under the script root, so use the
# whole path and clear the previous root.
builder.path = path
builder.script_root = ""
status_code = int(response[1].split(None, 1)[0])
# Only 307 and 308 preserve all of the original request.
if status_code not in {307, 308}:
# HEAD is preserved, everything else becomes GET.
if builder.method != "HEAD":
builder.method = "GET"
# Clear the body and the headers that describe it.
builder.input_stream = None
builder.content_type = None
builder.content_length = None
builder.headers.pop("Transfer-Encoding", None)
# Disable the response wrapper while handling redirects. Not
# thread safe, but the client should not be shared anyway.
old_response_wrapper = self.response_wrapper
self.response_wrapper = None
try:
return self.open(builder, as_tuple=True, buffered=buffered)
finally:
self.response_wrapper = old_response_wrapper
def open(self, *args, **kwargs):
"""Takes the same arguments as the :class:`EnvironBuilder` class with
some additions: You can provide a :class:`EnvironBuilder` or a WSGI
environment as only argument instead of the :class:`EnvironBuilder`
arguments and two optional keyword arguments (`as_tuple`, `buffered`)
that change the type of the return value or the way the application is
executed.
.. versionchanged:: 0.5
If a dict is provided as file in the dict for the `data` parameter
the content type has to be called `content_type` now instead of
`mimetype`. This change was made for consistency with
:class:`werkzeug.FileWrapper`.
The `follow_redirects` parameter was added to :func:`open`.
Additional parameters:
:param as_tuple: Returns a tuple in the form ``(environ, result)``
:param buffered: Set this to True to buffer the application run.
This will automatically close the application for
you as well.
:param follow_redirects: Set this to True if the `Client` should
follow HTTP redirects.
"""
as_tuple = kwargs.pop("as_tuple", False)
buffered = kwargs.pop("buffered", False)
follow_redirects = kwargs.pop("follow_redirects", False)
environ = None
if not kwargs and len(args) == 1:
if isinstance(args[0], EnvironBuilder):
environ = args[0].get_environ()
elif isinstance(args[0], dict):
environ = args[0]
if environ is None:
builder = EnvironBuilder(*args, **kwargs)
try:
environ = builder.get_environ()
finally:
builder.close()
response = self.run_wsgi_app(environ.copy(), buffered=buffered)
# handle redirects
redirect_chain = []
while 1:
status_code = int(response[1].split(None, 1)[0])
if (
status_code not in {301, 302, 303, 305, 307, 308}
or not follow_redirects
):
break
# Exhaust intermediate response bodies to ensure middleware
# that returns an iterator runs any cleanup code.
if not buffered:
for _ in response[0]:
pass
new_location = response[2]["location"]
new_redirect_entry = (new_location, status_code)
if new_redirect_entry in redirect_chain:
raise ClientRedirectError("loop detected")
redirect_chain.append(new_redirect_entry)
environ, response = self.resolve_redirect(
response, new_location, environ, buffered=buffered
)
if self.response_wrapper is not None:
response = self.response_wrapper(*response)
if as_tuple:
return environ, response
return response
def get(self, *args, **kw):
"""Like open but method is enforced to GET."""
kw["method"] = "GET"
return self.open(*args, **kw)
def patch(self, *args, **kw):
"""Like open but method is enforced to PATCH."""
kw["method"] = "PATCH"
return self.open(*args, **kw)
def post(self, *args, **kw):
"""Like open but method is enforced to POST."""
kw["method"] = "POST"
return self.open(*args, **kw)
def head(self, *args, **kw):
"""Like open but method is enforced to HEAD."""
kw["method"] = "HEAD"
return self.open(*args, **kw)
def put(self, *args, **kw):
"""Like open but method is enforced to PUT."""
kw["method"] = "PUT"
return self.open(*args, **kw)
def delete(self, *args, **kw):
"""Like open but method is enforced to DELETE."""
kw["method"] = "DELETE"
return self.open(*args, **kw)
def options(self, *args, **kw):
"""Like open but method is enforced to OPTIONS."""
kw["method"] = "OPTIONS"
return self.open(*args, **kw)
def trace(self, *args, **kw):
"""Like open but method is enforced to TRACE."""
kw["method"] = "TRACE"
return self.open(*args, **kw)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.application)
def create_environ(*args, **kwargs):
"""Create a new WSGI environ dict based on the values passed. The first
parameter should be the path of the request which defaults to '/'. The
second one can either be an absolute path (in that case the host is
localhost:80) or a full path to the request with scheme, netloc port and
the path to the script.
This accepts the same arguments as the :class:`EnvironBuilder`
constructor.
.. versionchanged:: 0.5
This function is now a thin wrapper over :class:`EnvironBuilder` which
was added in 0.5. The `headers`, `environ_base`, `environ_overrides`
and `charset` parameters were added.
"""
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_environ()
finally:
builder.close()
def run_wsgi_app(app, environ, buffered=False):
"""Return a tuple in the form (app_iter, status, headers) of the
application output. This works best if you pass it an application that
returns an iterator all the time.
Sometimes applications may use the `write()` callable returned
by the `start_response` function. This tries to resolve such edge
cases automatically. But if you don't get the expected output you
should set `buffered` to `True` which enforces buffering.
If passed an invalid WSGI application the behavior of this function is
undefined. Never pass non-conforming WSGI applications to this function.
:param app: the application to execute.
:param buffered: set to `True` to enforce buffering.
:return: tuple in the form ``(app_iter, status, headers)``
"""
environ = _get_environ(environ)
response = []
buffer = []
def start_response(status, headers, exc_info=None):
if exc_info is not None:
reraise(*exc_info)
response[:] = [status, headers]
return buffer.append
app_rv = app(environ, start_response)
close_func = getattr(app_rv, "close", None)
app_iter = iter(app_rv)
# when buffering we emit the close call early and convert the
# application iterator into a regular list
if buffered:
try:
app_iter = list(app_iter)
finally:
if close_func is not None:
close_func()
# otherwise we iterate the application iter until we have a response, chain
# the already received data with the already collected data and wrap it in
# a new `ClosingIterator` if we need to restore a `close` callable from the
# original return value.
else:
for item in app_iter:
buffer.append(item)
if response:
break
if buffer:
app_iter = chain(buffer, app_iter)
if close_func is not None and app_iter is not app_rv:
app_iter = ClosingIterator(app_iter, close_func)
return app_iter, response[0], Headers(response[1])
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/formparser.py
|
# -*- coding: utf-8 -*-
"""
werkzeug.formparser
~~~~~~~~~~~~~~~~~~~
This module implements the form parsing. It supports url-encoded forms
as well as non-nested multipart uploads.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import re
from functools import update_wrapper
from itertools import chain
from itertools import repeat
from itertools import tee
from . import exceptions
from ._compat import BytesIO
from ._compat import text_type
from ._compat import to_native
from .datastructures import FileStorage
from .datastructures import Headers
from .datastructures import MultiDict
from .http import parse_options_header
from .urls import url_decode_stream
from .wsgi import get_content_length
from .wsgi import get_input_stream
from .wsgi import make_line_iter
# there are some platforms where SpooledTemporaryFile is not available.
# In that case we need to provide a fallback.
try:
from tempfile import SpooledTemporaryFile
except ImportError:
from tempfile import TemporaryFile
SpooledTemporaryFile = None
#: an iterator that yields empty strings
_empty_string_iter = repeat("")
#: a regular expression for multipart boundaries
_multipart_boundary_re = re.compile("^[ -~]{0,200}[!-~]$")
#: supported http encodings that are also available in python we support
#: for multipart messages.
_supported_multipart_encodings = frozenset(["base64", "quoted-printable"])
def default_stream_factory(
total_content_length, filename, content_type, content_length=None
):
"""The stream factory that is used per default."""
max_size = 1024 * 500
if SpooledTemporaryFile is not None:
return SpooledTemporaryFile(max_size=max_size, mode="wb+")
if total_content_length is None or total_content_length > max_size:
return TemporaryFile("wb+")
return BytesIO()
def parse_form_data(
environ,
stream_factory=None,
charset="utf-8",
errors="replace",
max_form_memory_size=None,
max_content_length=None,
cls=None,
silent=True,
):
"""Parse the form data in the environ and return it as tuple in the form
``(stream, form, files)``. You should only call this method if the
transport method is `POST`, `PUT`, or `PATCH`.
If the mimetype of the data transmitted is `multipart/form-data` the
files multidict will be filled with `FileStorage` objects. If the
mimetype is unknown the input stream is wrapped and returned as first
argument, else the stream is empty.
This is a shortcut for the common usage of :class:`FormDataParser`.
Have a look at :ref:`dealing-with-request-data` for more details.
.. versionadded:: 0.5
The `max_form_memory_size`, `max_content_length` and
`cls` parameters were added.
.. versionadded:: 0.5.1
The optional `silent` flag was added.
:param environ: the WSGI environment to be used for parsing.
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
:return: A tuple in the form ``(stream, form, files)``.
"""
return FormDataParser(
stream_factory,
charset,
errors,
max_form_memory_size,
max_content_length,
cls,
silent,
).parse_from_environ(environ)
def exhaust_stream(f):
"""Helper decorator for methods that exhausts the stream on return."""
def wrapper(self, stream, *args, **kwargs):
try:
return f(self, stream, *args, **kwargs)
finally:
exhaust = getattr(stream, "exhaust", None)
if exhaust is not None:
exhaust()
else:
while 1:
chunk = stream.read(1024 * 64)
if not chunk:
break
return update_wrapper(wrapper, f)
class FormDataParser(object):
"""This class implements parsing of form data for Werkzeug. By itself
it can parse multipart and url encoded form data. It can be subclassed
and extended but for most mimetypes it is a better idea to use the
untouched stream and expose it as separate attributes on a request
object.
.. versionadded:: 0.8
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
"""
def __init__(
self,
stream_factory=None,
charset="utf-8",
errors="replace",
max_form_memory_size=None,
max_content_length=None,
cls=None,
silent=True,
):
if stream_factory is None:
stream_factory = default_stream_factory
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
self.max_content_length = max_content_length
if cls is None:
cls = MultiDict
self.cls = cls
self.silent = silent
def get_parse_func(self, mimetype, options):
return self.parse_functions.get(mimetype)
def parse_from_environ(self, environ):
"""Parses the information from the environment as form data.
:param environ: the WSGI environment to be used for parsing.
:return: A tuple in the form ``(stream, form, files)``.
"""
content_type = environ.get("CONTENT_TYPE", "")
content_length = get_content_length(environ)
mimetype, options = parse_options_header(content_type)
return self.parse(get_input_stream(environ), mimetype, content_length, options)
def parse(self, stream, mimetype, content_length, options=None):
"""Parses the information from the given stream, mimetype,
content length and mimetype parameters.
:param stream: an input stream
:param mimetype: the mimetype of the data
:param content_length: the content length of the incoming data
:param options: optional mimetype parameters (used for
the multipart boundary for instance)
:return: A tuple in the form ``(stream, form, files)``.
"""
if (
self.max_content_length is not None
and content_length is not None
and content_length > self.max_content_length
):
raise exceptions.RequestEntityTooLarge()
if options is None:
options = {}
parse_func = self.get_parse_func(mimetype, options)
if parse_func is not None:
try:
return parse_func(self, stream, mimetype, content_length, options)
except ValueError:
if not self.silent:
raise
return stream, self.cls(), self.cls()
@exhaust_stream
def _parse_multipart(self, stream, mimetype, content_length, options):
parser = MultiPartParser(
self.stream_factory,
self.charset,
self.errors,
max_form_memory_size=self.max_form_memory_size,
cls=self.cls,
)
boundary = options.get("boundary")
if boundary is None:
raise ValueError("Missing boundary")
if isinstance(boundary, text_type):
boundary = boundary.encode("ascii")
form, files = parser.parse(stream, boundary, content_length)
return stream, form, files
@exhaust_stream
def _parse_urlencoded(self, stream, mimetype, content_length, options):
if (
self.max_form_memory_size is not None
and content_length is not None
and content_length > self.max_form_memory_size
):
raise exceptions.RequestEntityTooLarge()
form = url_decode_stream(stream, self.charset, errors=self.errors, cls=self.cls)
return stream, form, self.cls()
#: mapping of mimetypes to parsing functions
parse_functions = {
"multipart/form-data": _parse_multipart,
"application/x-www-form-urlencoded": _parse_urlencoded,
"application/x-url-encoded": _parse_urlencoded,
}
def is_valid_multipart_boundary(boundary):
"""Checks if the string given is a valid multipart boundary."""
return _multipart_boundary_re.match(boundary) is not None
def _line_parse(line):
"""Removes line ending characters and returns a tuple (`stripped_line`,
`is_terminated`).
"""
if line[-2:] in ["\r\n", b"\r\n"]:
return line[:-2], True
elif line[-1:] in ["\r", "\n", b"\r", b"\n"]:
return line[:-1], True
return line, False
def parse_multipart_headers(iterable):
"""Parses multipart headers from an iterable that yields lines (including
the trailing newline symbol). The iterable has to be newline terminated.
The iterable will stop at the line where the headers ended so it can be
further consumed.
:param iterable: iterable of strings that are newline terminated
"""
result = []
for line in iterable:
line = to_native(line)
line, line_terminated = _line_parse(line)
if not line_terminated:
raise ValueError("unexpected end of line in multipart header")
if not line:
break
elif line[0] in " \t" and result:
key, value = result[-1]
result[-1] = (key, value + "\n " + line[1:])
else:
parts = line.split(":", 1)
if len(parts) == 2:
result.append((parts[0].strip(), parts[1].strip()))
# we link the list to the headers, no need to create a copy, the
# list was not shared anyways.
return Headers(result)
_begin_form = "begin_form"
_begin_file = "begin_file"
_cont = "cont"
_end = "end"
class MultiPartParser(object):
def __init__(
self,
stream_factory=None,
charset="utf-8",
errors="replace",
max_form_memory_size=None,
cls=None,
buffer_size=64 * 1024,
):
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
self.stream_factory = (
default_stream_factory if stream_factory is None else stream_factory
)
self.cls = MultiDict if cls is None else cls
# make sure the buffer size is divisible by four so that we can base64
# decode chunk by chunk
assert buffer_size % 4 == 0, "buffer size has to be divisible by 4"
# also the buffer size has to be at least 1024 bytes long or long headers
# will freak out the system
assert buffer_size >= 1024, "buffer size has to be at least 1KB"
self.buffer_size = buffer_size
def _fix_ie_filename(self, filename):
"""Internet Explorer 6 transmits the full file name if a file is
uploaded. This function strips the full path if it thinks the
filename is Windows-like absolute.
"""
if filename[1:3] == ":\\" or filename[:2] == "\\\\":
return filename.split("\\")[-1]
return filename
def _find_terminator(self, iterator):
"""The terminator might have some additional newlines before it.
There is at least one application that sends additional newlines
before headers (the python setuptools package).
"""
for line in iterator:
if not line:
break
line = line.strip()
if line:
return line
return b""
def fail(self, message):
raise ValueError(message)
def get_part_encoding(self, headers):
transfer_encoding = headers.get("content-transfer-encoding")
if (
transfer_encoding is not None
and transfer_encoding in _supported_multipart_encodings
):
return transfer_encoding
def get_part_charset(self, headers):
# Figure out input charset for current part
content_type = headers.get("content-type")
if content_type:
mimetype, ct_params = parse_options_header(content_type)
return ct_params.get("charset", self.charset)
return self.charset
def start_file_streaming(self, filename, headers, total_content_length):
if isinstance(filename, bytes):
filename = filename.decode(self.charset, self.errors)
filename = self._fix_ie_filename(filename)
content_type = headers.get("content-type")
try:
content_length = int(headers["content-length"])
except (KeyError, ValueError):
content_length = 0
container = self.stream_factory(
total_content_length=total_content_length,
filename=filename,
content_type=content_type,
content_length=content_length,
)
return filename, container
def in_memory_threshold_reached(self, bytes):
raise exceptions.RequestEntityTooLarge()
def validate_boundary(self, boundary):
if not boundary:
self.fail("Missing boundary")
if not is_valid_multipart_boundary(boundary):
self.fail("Invalid boundary: %s" % boundary)
if len(boundary) > self.buffer_size: # pragma: no cover
# this should never happen because we check for a minimum size
# of 1024 and boundaries may not be longer than 200. The only
# situation when this happens is for non debug builds where
# the assert is skipped.
self.fail("Boundary longer than buffer size")
def parse_lines(self, file, boundary, content_length, cap_at_buffer=True):
"""Generate parts of
``('begin_form', (headers, name))``
``('begin_file', (headers, name, filename))``
``('cont', bytestring)``
``('end', None)``
Always obeys the grammar
parts = ( begin_form cont* end |
begin_file cont* end )*
"""
next_part = b"--" + boundary
last_part = next_part + b"--"
iterator = chain(
make_line_iter(
file,
limit=content_length,
buffer_size=self.buffer_size,
cap_at_buffer=cap_at_buffer,
),
_empty_string_iter,
)
terminator = self._find_terminator(iterator)
if terminator == last_part:
return
elif terminator != next_part:
self.fail("Expected boundary at start of multipart data")
while terminator != last_part:
headers = parse_multipart_headers(iterator)
disposition = headers.get("content-disposition")
if disposition is None:
self.fail("Missing Content-Disposition header")
disposition, extra = parse_options_header(disposition)
transfer_encoding = self.get_part_encoding(headers)
name = extra.get("name")
filename = extra.get("filename")
# if no content type is given we stream into memory. A list is
# used as a temporary container.
if filename is None:
yield _begin_form, (headers, name)
# otherwise we parse the rest of the headers and ask the stream
# factory for something we can write in.
else:
yield _begin_file, (headers, name, filename)
buf = b""
for line in iterator:
if not line:
self.fail("unexpected end of stream")
if line[:2] == b"--":
terminator = line.rstrip()
if terminator in (next_part, last_part):
break
if transfer_encoding is not None:
if transfer_encoding == "base64":
transfer_encoding = "base64_codec"
try:
line = codecs.decode(line, transfer_encoding)
except Exception:
self.fail("could not decode transfer encoded chunk")
# we have something in the buffer from the last iteration.
# this is usually a newline delimiter.
if buf:
yield _cont, buf
buf = b""
# If the line ends with windows CRLF we write everything except
# the last two bytes. In all other cases however we write
# everything except the last byte. If it was a newline, that's
# fine, otherwise it does not matter because we will write it
# the next iteration. this ensures we do not write the
# final newline into the stream. That way we do not have to
# truncate the stream. However we do have to make sure that
# if something else than a newline is in there we write it
# out.
if line[-2:] == b"\r\n":
buf = b"\r\n"
cutoff = -2
else:
buf = line[-1:]
cutoff = -1
yield _cont, line[:cutoff]
else: # pragma: no cover
raise ValueError("unexpected end of part")
# if we have a leftover in the buffer that is not a newline
# character we have to flush it, otherwise we will chop of
# certain values.
if buf not in (b"", b"\r", b"\n", b"\r\n"):
yield _cont, buf
yield _end, None
def parse_parts(self, file, boundary, content_length):
"""Generate ``('file', (name, val))`` and
``('form', (name, val))`` parts.
"""
in_memory = 0
for ellt, ell in self.parse_lines(file, boundary, content_length):
if ellt == _begin_file:
headers, name, filename = ell
is_file = True
guard_memory = False
filename, container = self.start_file_streaming(
filename, headers, content_length
)
_write = container.write
elif ellt == _begin_form:
headers, name = ell
is_file = False
container = []
_write = container.append
guard_memory = self.max_form_memory_size is not None
elif ellt == _cont:
_write(ell)
# if we write into memory and there is a memory size limit we
# count the number of bytes in memory and raise an exception if
# there is too much data in memory.
if guard_memory:
in_memory += len(ell)
if in_memory > self.max_form_memory_size:
self.in_memory_threshold_reached(in_memory)
elif ellt == _end:
if is_file:
container.seek(0)
yield (
"file",
(name, FileStorage(container, filename, name, headers=headers)),
)
else:
part_charset = self.get_part_charset(headers)
yield (
"form",
(name, b"".join(container).decode(part_charset, self.errors)),
)
def parse(self, file, boundary, content_length):
formstream, filestream = tee(
self.parse_parts(file, boundary, content_length), 2
)
form = (p[1] for p in formstream if p[0] == "form")
files = (p[1] for p in filestream if p[0] == "file")
return self.cls(form), self.cls(files)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/posixemulation.py
|
# -*- coding: utf-8 -*-
r"""
werkzeug.posixemulation
~~~~~~~~~~~~~~~~~~~~~~~
Provides a POSIX emulation for some features that are relevant to
web applications. The main purpose is to simplify support for
systems such as Windows NT that are not 100% POSIX compatible.
Currently this only implements a :func:`rename` function that
follows POSIX semantics. Eg: if the target file already exists it
will be replaced without asking.
This module was introduced in 0.6.1 and is not a public interface.
It might become one in later versions of Werkzeug.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import errno
import os
import random
import sys
import time
from ._compat import to_unicode
from .filesystem import get_filesystem_encoding
can_rename_open_file = False
if os.name == "nt":
try:
import ctypes
_MOVEFILE_REPLACE_EXISTING = 0x1
_MOVEFILE_WRITE_THROUGH = 0x8
_MoveFileEx = ctypes.windll.kernel32.MoveFileExW
def _rename(src, dst):
src = to_unicode(src, get_filesystem_encoding())
dst = to_unicode(dst, get_filesystem_encoding())
if _rename_atomic(src, dst):
return True
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileEx(
src, dst, _MOVEFILE_REPLACE_EXISTING | _MOVEFILE_WRITE_THROUGH
)
if not rv:
time.sleep(0.001)
retry += 1
return rv
# new in Vista and Windows Server 2008
_CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
_CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
_MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
_CloseHandle = ctypes.windll.kernel32.CloseHandle
can_rename_open_file = True
def _rename_atomic(src, dst):
ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, "Werkzeug rename")
if ta == -1:
return False
try:
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileTransacted(
src,
dst,
None,
None,
_MOVEFILE_REPLACE_EXISTING | _MOVEFILE_WRITE_THROUGH,
ta,
)
if rv:
rv = _CommitTransaction(ta)
break
else:
time.sleep(0.001)
retry += 1
return rv
finally:
_CloseHandle(ta)
except Exception:
def _rename(src, dst):
return False
def _rename_atomic(src, dst):
return False
def rename(src, dst):
# Try atomic or pseudo-atomic rename
if _rename(src, dst):
return
# Fall back to "move away and replace"
try:
os.rename(src, dst)
except OSError as e:
if e.errno != errno.EEXIST:
raise
old = "%s-%08x" % (dst, random.randint(0, sys.maxsize))
os.rename(dst, old)
os.rename(src, dst)
try:
os.unlink(old)
except Exception:
pass
else:
rename = os.rename
can_rename_open_file = True
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/utils.py
|
# -*- coding: utf-8 -*-
"""
werkzeug.utils
~~~~~~~~~~~~~~
This module implements various utilities for WSGI applications. Most of
them are used by the request and response wrappers but especially for
middleware development it makes sense to use them without the wrappers.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import os
import pkgutil
import re
import sys
from ._compat import iteritems
from ._compat import PY2
from ._compat import reraise
from ._compat import string_types
from ._compat import text_type
from ._compat import unichr
from ._internal import _DictAccessorProperty
from ._internal import _missing
from ._internal import _parse_signature
try:
from html.entities import name2codepoint
except ImportError:
from htmlentitydefs import name2codepoint
_format_re = re.compile(r"\$(?:(%s)|\{(%s)\})" % (("[a-zA-Z_][a-zA-Z0-9_]*",) * 2))
_entity_re = re.compile(r"&([^;]+);")
_filename_ascii_strip_re = re.compile(r"[^A-Za-z0-9_.-]")
_windows_device_files = (
"CON",
"AUX",
"COM1",
"COM2",
"COM3",
"COM4",
"LPT1",
"LPT2",
"LPT3",
"PRN",
"NUL",
)
class cached_property(property):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
"""
# implementation detail: A subclass of python's builtin property
# decorator, we override __get__ to check for a cached value. If one
# chooses to invoke __get__ by hand the property will still work as
# expected because the lookup logic is replicated in __get__ for
# manual invocation.
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __set__(self, obj, value):
obj.__dict__[self.__name__] = value
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
def invalidate_cached_property(obj, name):
"""Invalidates the cache for a :class:`cached_property`:
>>> class Test(object):
... @cached_property
... def magic_number(self):
... print("recalculating...")
... return 42
...
>>> var = Test()
>>> var.magic_number
recalculating...
42
>>> var.magic_number
42
>>> invalidate_cached_property(var, "magic_number")
>>> var.magic_number
recalculating...
42
You must pass the name of the cached property as the second argument.
"""
if not isinstance(getattr(obj.__class__, name, None), cached_property):
raise TypeError(
"Attribute {} of object {} is not a cached_property, "
"cannot be invalidated".format(name, obj)
)
obj.__dict__[name] = _missing
class environ_property(_DictAccessorProperty):
"""Maps request attributes to environment variables. This works not only
for the Werzeug request object, but also any other class with an
environ attribute:
>>> class Test(object):
... environ = {'key': 'value'}
... test = environ_property('key')
>>> var = Test()
>>> var.test
'value'
If you pass it a second value it's used as default if the key does not
exist, the third one can be a converter that takes a value and converts
it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value
is used. If no default value is provided `None` is used.
Per default the property is read only. You have to explicitly enable it
by passing ``read_only=False`` to the constructor.
"""
read_only = True
def lookup(self, obj):
return obj.environ
class header_property(_DictAccessorProperty):
"""Like `environ_property` but for headers."""
def lookup(self, obj):
return obj.headers
class HTMLBuilder(object):
"""Helper object for HTML generation.
Per default there are two instances of that class. The `html` one, and
the `xhtml` one for those two dialects. The class uses keyword parameters
and positional parameters to generate small snippets of HTML.
Keyword parameters are converted to XML/SGML attributes, positional
arguments are used as children. Because Python accepts positional
arguments before keyword arguments it's a good idea to use a list with the
star-syntax for some children:
>>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ',
... html.a('bar', href='bar.html')])
u'<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>'
This class works around some browser limitations and can not be used for
arbitrary SGML/XML generation. For that purpose lxml and similar
libraries exist.
Calling the builder escapes the string passed:
>>> html.p(html("<foo>"))
u'<p><foo></p>'
"""
_entity_re = re.compile(r"&([^;]+);")
_entities = name2codepoint.copy()
_entities["apos"] = 39
_empty_elements = {
"area",
"base",
"basefont",
"br",
"col",
"command",
"embed",
"frame",
"hr",
"img",
"input",
"keygen",
"isindex",
"link",
"meta",
"param",
"source",
"wbr",
}
_boolean_attributes = {
"selected",
"checked",
"compact",
"declare",
"defer",
"disabled",
"ismap",
"multiple",
"nohref",
"noresize",
"noshade",
"nowrap",
}
_plaintext_elements = {"textarea"}
_c_like_cdata = {"script", "style"}
def __init__(self, dialect):
self._dialect = dialect
def __call__(self, s):
return escape(s)
def __getattr__(self, tag):
if tag[:2] == "__":
raise AttributeError(tag)
def proxy(*children, **arguments):
buffer = "<" + tag
for key, value in iteritems(arguments):
if value is None:
continue
if key[-1] == "_":
key = key[:-1]
if key in self._boolean_attributes:
if not value:
continue
if self._dialect == "xhtml":
value = '="' + key + '"'
else:
value = ""
else:
value = '="' + escape(value) + '"'
buffer += " " + key + value
if not children and tag in self._empty_elements:
if self._dialect == "xhtml":
buffer += " />"
else:
buffer += ">"
return buffer
buffer += ">"
children_as_string = "".join(
[text_type(x) for x in children if x is not None]
)
if children_as_string:
if tag in self._plaintext_elements:
children_as_string = escape(children_as_string)
elif tag in self._c_like_cdata and self._dialect == "xhtml":
children_as_string = (
"/*<![CDATA[*/" + children_as_string + "/*]]>*/"
)
buffer += children_as_string + "</" + tag + ">"
return buffer
return proxy
def __repr__(self):
return "<%s for %r>" % (self.__class__.__name__, self._dialect)
html = HTMLBuilder("html")
xhtml = HTMLBuilder("xhtml")
# https://cgit.freedesktop.org/xdg/shared-mime-info/tree/freedesktop.org.xml.in
# https://www.iana.org/assignments/media-types/media-types.xhtml
# Types listed in the XDG mime info that have a charset in the IANA registration.
_charset_mimetypes = {
"application/ecmascript",
"application/javascript",
"application/sql",
"application/xml",
"application/xml-dtd",
"application/xml-external-parsed-entity",
}
def get_content_type(mimetype, charset):
"""Returns the full content type string with charset for a mimetype.
If the mimetype represents text, the charset parameter will be
appended, otherwise the mimetype is returned unchanged.
:param mimetype: The mimetype to be used as content type.
:param charset: The charset to be appended for text mimetypes.
:return: The content type.
.. versionchanged:: 0.15
Any type that ends with ``+xml`` gets a charset, not just those
that start with ``application/``. Known text types such as
``application/javascript`` are also given charsets.
"""
if (
mimetype.startswith("text/")
or mimetype in _charset_mimetypes
or mimetype.endswith("+xml")
):
mimetype += "; charset=" + charset
return mimetype
def detect_utf_encoding(data):
"""Detect which UTF encoding was used to encode the given bytes.
The latest JSON standard (:rfc:`8259`) suggests that only UTF-8 is
accepted. Older documents allowed 8, 16, or 32. 16 and 32 can be big
or little endian. Some editors or libraries may prepend a BOM.
:internal:
:param data: Bytes in unknown UTF encoding.
:return: UTF encoding name
.. versionadded:: 0.15
"""
head = data[:4]
if head[:3] == codecs.BOM_UTF8:
return "utf-8-sig"
if b"\x00" not in head:
return "utf-8"
if head in (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE):
return "utf-32"
if head[:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE):
return "utf-16"
if len(head) == 4:
if head[:3] == b"\x00\x00\x00":
return "utf-32-be"
if head[::2] == b"\x00\x00":
return "utf-16-be"
if head[1:] == b"\x00\x00\x00":
return "utf-32-le"
if head[1::2] == b"\x00\x00":
return "utf-16-le"
if len(head) == 2:
return "utf-16-be" if head.startswith(b"\x00") else "utf-16-le"
return "utf-8"
def format_string(string, context):
"""String-template format a string:
>>> format_string('$foo and ${foo}s', dict(foo=42))
'42 and 42s'
This does not do any attribute lookup etc. For more advanced string
formattings have a look at the `werkzeug.template` module.
:param string: the format string.
:param context: a dict with the variables to insert.
"""
def lookup_arg(match):
x = context[match.group(1) or match.group(2)]
if not isinstance(x, string_types):
x = type(string)(x)
return x
return _format_re.sub(lookup_arg, string)
def secure_filename(filename):
r"""Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`. The filename returned is an ASCII only string
for maximum portability.
On windows systems the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you abort or
generate a random filename if the function returned an empty one.
.. versionadded:: 0.5
:param filename: the filename to secure
"""
if isinstance(filename, text_type):
from unicodedata import normalize
filename = normalize("NFKD", filename).encode("ascii", "ignore")
if not PY2:
filename = filename.decode("ascii")
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, " ")
filename = str(_filename_ascii_strip_re.sub("", "_".join(filename.split()))).strip(
"._"
)
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if (
os.name == "nt"
and filename
and filename.split(".")[0].upper() in _windows_device_files
):
filename = "_" + filename
return filename
def escape(s):
"""Replace special characters "&", "<", ">" and (") to HTML-safe sequences.
There is a special handling for `None` which escapes to an empty string.
.. versionchanged:: 0.9
`quote` is now implicitly on.
:param s: the string to escape.
:param quote: ignored.
"""
if s is None:
return ""
elif hasattr(s, "__html__"):
return text_type(s.__html__())
if not isinstance(s, string_types):
s = text_type(s)
return (
s.replace("&", "&")
.replace("<", "<")
.replace(">", ">")
.replace('"', """)
)
def unescape(s):
"""The reverse function of `escape`. This unescapes all the HTML
entities, not only the XML entities inserted by `escape`.
:param s: the string to unescape.
"""
def handle_match(m):
name = m.group(1)
if name in HTMLBuilder._entities:
return unichr(HTMLBuilder._entities[name])
try:
if name[:2] in ("#x", "#X"):
return unichr(int(name[2:], 16))
elif name.startswith("#"):
return unichr(int(name[1:]))
except ValueError:
pass
return u""
return _entity_re.sub(handle_match, s)
def redirect(location, code=302, Response=None):
"""Returns a response object (a WSGI application) that, if called,
redirects the client to the target location. Supported codes are
301, 302, 303, 305, 307, and 308. 300 is not supported because
it's not a real redirect and 304 because it's the answer for a
request with a request with defined If-Modified-Since headers.
.. versionadded:: 0.6
The location can now be a unicode string that is encoded using
the :func:`iri_to_uri` function.
.. versionadded:: 0.10
The class used for the Response object can now be passed in.
:param location: the location the response should redirect to.
:param code: the redirect status code. defaults to 302.
:param class Response: a Response class to use when instantiating a
response. The default is :class:`werkzeug.wrappers.Response` if
unspecified.
"""
if Response is None:
from .wrappers import Response
display_location = escape(location)
if isinstance(location, text_type):
# Safe conversion is necessary here as we might redirect
# to a broken URI scheme (for instance itms-services).
from .urls import iri_to_uri
location = iri_to_uri(location, safe_conversion=True)
response = Response(
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
"<title>Redirecting...</title>\n"
"<h1>Redirecting...</h1>\n"
"<p>You should be redirected automatically to target URL: "
'<a href="%s">%s</a>. If not click the link.'
% (escape(location), display_location),
code,
mimetype="text/html",
)
response.headers["Location"] = location
return response
def append_slash_redirect(environ, code=301):
"""Redirects to the same URL but with a slash appended. The behavior
of this function is undefined if the path ends with a slash already.
:param environ: the WSGI environment for the request that triggers
the redirect.
:param code: the status code for the redirect.
"""
new_path = environ["PATH_INFO"].strip("/") + "/"
query_string = environ.get("QUERY_STRING")
if query_string:
new_path += "?" + query_string
return redirect(new_path, code)
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
# force the import name to automatically convert to strings
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
import_name = str(import_name).replace(":", ".")
try:
try:
__import__(import_name)
except ImportError:
if "." not in import_name:
raise
else:
return sys.modules[import_name]
module_name, obj_name = import_name.rsplit(".", 1)
module = __import__(module_name, globals(), locals(), [obj_name])
try:
return getattr(module, obj_name)
except AttributeError as e:
raise ImportError(e)
except ImportError as e:
if not silent:
reraise(
ImportStringError, ImportStringError(import_name, e), sys.exc_info()[2]
)
def find_modules(import_path, include_packages=False, recursive=False):
"""Finds all the modules below a package. This can be useful to
automatically import all views / controllers so that their metaclasses /
function decorators have a chance to register themselves on the
application.
Packages are not returned unless `include_packages` is `True`. This can
also recursively list modules but in that case it will import all the
packages to get the correct load path of that module.
:param import_path: the dotted name for the package to find child modules.
:param include_packages: set to `True` if packages should be returned, too.
:param recursive: set to `True` if recursion should happen.
:return: generator
"""
module = import_string(import_path)
path = getattr(module, "__path__", None)
if path is None:
raise ValueError("%r is not a package" % import_path)
basename = module.__name__ + "."
for _importer, modname, ispkg in pkgutil.iter_modules(path):
modname = basename + modname
if ispkg:
if include_packages:
yield modname
if recursive:
for item in find_modules(modname, include_packages, True):
yield item
else:
yield modname
def validate_arguments(func, args, kwargs, drop_extra=True):
"""Checks if the function accepts the arguments and keyword arguments.
Returns a new ``(args, kwargs)`` tuple that can safely be passed to
the function without causing a `TypeError` because the function signature
is incompatible. If `drop_extra` is set to `True` (which is the default)
any extra positional or keyword arguments are dropped automatically.
The exception raised provides three attributes:
`missing`
A set of argument names that the function expected but where
missing.
`extra`
A dict of keyword arguments that the function can not handle but
where provided.
`extra_positional`
A list of values that where given by positional argument but the
function cannot accept.
This can be useful for decorators that forward user submitted data to
a view function::
from werkzeug.utils import ArgumentValidationError, validate_arguments
def sanitize(f):
def proxy(request):
data = request.values.to_dict()
try:
args, kwargs = validate_arguments(f, (request,), data)
except ArgumentValidationError:
raise BadRequest('The browser failed to transmit all '
'the data expected.')
return f(*args, **kwargs)
return proxy
:param func: the function the validation is performed against.
:param args: a tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:param drop_extra: set to `False` if you don't want extra arguments
to be silently dropped.
:return: tuple in the form ``(args, kwargs)``.
"""
parser = _parse_signature(func)
args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5]
if missing:
raise ArgumentValidationError(tuple(missing))
elif (extra or extra_positional) and not drop_extra:
raise ArgumentValidationError(None, extra, extra_positional)
return tuple(args), kwargs
def bind_arguments(func, args, kwargs):
"""Bind the arguments provided into a dict. When passed a function,
a tuple of arguments and a dict of keyword arguments `bind_arguments`
returns a dict of names as the function would see it. This can be useful
to implement a cache decorator that uses the function arguments to build
the cache key based on the values of the arguments.
:param func: the function the arguments should be bound for.
:param args: tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:return: a :class:`dict` of bound keyword arguments.
"""
(
args,
kwargs,
missing,
extra,
extra_positional,
arg_spec,
vararg_var,
kwarg_var,
) = _parse_signature(func)(args, kwargs)
values = {}
for (name, _has_default, _default), value in zip(arg_spec, args):
values[name] = value
if vararg_var is not None:
values[vararg_var] = tuple(extra_positional)
elif extra_positional:
raise TypeError("too many positional arguments")
if kwarg_var is not None:
multikw = set(extra) & set([x[0] for x in arg_spec])
if multikw:
raise TypeError(
"got multiple values for keyword argument " + repr(next(iter(multikw)))
)
values[kwarg_var] = extra
elif extra:
raise TypeError("got unexpected keyword argument " + repr(next(iter(extra))))
return values
class ArgumentValidationError(ValueError):
"""Raised if :func:`validate_arguments` fails to validate"""
def __init__(self, missing=None, extra=None, extra_positional=None):
self.missing = set(missing or ())
self.extra = extra or {}
self.extra_positional = extra_positional or []
ValueError.__init__(
self,
"function arguments invalid. (%d missing, %d additional)"
% (len(self.missing), len(self.extra) + len(self.extra_positional)),
)
class ImportStringError(ImportError):
"""Provides information about a failed :func:`import_string` attempt."""
#: String in dotted notation that failed to be imported.
import_name = None
#: Wrapped exception.
exception = None
def __init__(self, import_name, exception):
self.import_name = import_name
self.exception = exception
msg = (
"import_string() failed for %r. Possible reasons are:\n\n"
"- missing __init__.py in a package;\n"
"- package or module path not included in sys.path;\n"
"- duplicated package or module name taking precedence in "
"sys.path;\n"
"- missing module, class, function or variable;\n\n"
"Debugged import:\n\n%s\n\n"
"Original exception:\n\n%s: %s"
)
name = ""
tracked = []
for part in import_name.replace(":", ".").split("."):
name += (name and ".") + part
imported = import_string(name, silent=True)
if imported:
tracked.append((name, getattr(imported, "__file__", None)))
else:
track = ["- %r found in %r." % (n, i) for n, i in tracked]
track.append("- %r not found." % name)
msg = msg % (
import_name,
"\n".join(track),
exception.__class__.__name__,
str(exception),
)
break
ImportError.__init__(self, msg)
def __repr__(self):
return "<%s(%r, %r)>" % (
self.__class__.__name__,
self.import_name,
self.exception,
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/routing.py
|
# -*- coding: utf-8 -*-
"""
werkzeug.routing
~~~~~~~~~~~~~~~~
When it comes to combining multiple controller or view functions (however
you want to call them) you need a dispatcher. A simple way would be
applying regular expression tests on the ``PATH_INFO`` and calling
registered callback functions that return the value then.
This module implements a much more powerful system than simple regular
expression matching because it can also convert values in the URLs and
build URLs.
Here a simple example that creates an URL map for an application with
two subdomains (www and kb) and some URL rules:
>>> m = Map([
... # Static URLs
... Rule('/', endpoint='static/index'),
... Rule('/about', endpoint='static/about'),
... Rule('/help', endpoint='static/help'),
... # Knowledge Base
... Subdomain('kb', [
... Rule('/', endpoint='kb/index'),
... Rule('/browse/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
... ])
... ], default_subdomain='www')
If the application doesn't use subdomains it's perfectly fine to not set
the default subdomain and not use the `Subdomain` rule factory. The endpoint
in the rules can be anything, for example import paths or unique
identifiers. The WSGI application can use those endpoints to get the
handler for that URL. It doesn't have to be a string at all but it's
recommended.
Now it's possible to create a URL adapter for one of the subdomains and
build URLs:
>>> c = m.bind('example.com')
>>> c.build("kb/browse", dict(id=42))
'http://kb.example.com/browse/42/'
>>> c.build("kb/browse", dict())
'http://kb.example.com/browse/'
>>> c.build("kb/browse", dict(id=42, page=3))
'http://kb.example.com/browse/42/3'
>>> c.build("static/about")
'/about'
>>> c.build("static/index", force_external=True)
'http://www.example.com/'
>>> c = m.bind('example.com', subdomain='kb')
>>> c.build("static/about")
'http://www.example.com/about'
The first argument to bind is the server name *without* the subdomain.
Per default it will assume that the script is mounted on the root, but
often that's not the case so you can provide the real mount point as
second argument:
>>> c = m.bind('example.com', '/applications/example')
The third argument can be the subdomain, if not given the default
subdomain is used. For more details about binding have a look at the
documentation of the `MapAdapter`.
And here is how you can match URLs:
>>> c = m.bind('example.com')
>>> c.match("/")
('static/index', {})
>>> c.match("/about")
('static/about', {})
>>> c = m.bind('example.com', '/', 'kb')
>>> c.match("/")
('kb/index', {})
>>> c.match("/browse/42/23")
('kb/browse', {'id': 42, 'page': 23})
If matching fails you get a `NotFound` exception, if the rule thinks
it's a good idea to redirect (for example because the URL was defined
to have a slash at the end but the request was missing that slash) it
will raise a `RequestRedirect` exception. Both are subclasses of the
`HTTPException` so you can use those errors as responses in the
application.
If matching succeeded but the URL rule was incompatible to the given
method (for example there were only rules for `GET` and `HEAD` and
routing system tried to match a `POST` request) a `MethodNotAllowed`
exception is raised.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import ast
import difflib
import posixpath
import re
import uuid
import warnings
from pprint import pformat
from threading import Lock
from ._compat import implements_to_string
from ._compat import iteritems
from ._compat import itervalues
from ._compat import native_string_result
from ._compat import string_types
from ._compat import text_type
from ._compat import to_bytes
from ._compat import to_unicode
from ._compat import wsgi_decoding_dance
from ._internal import _encode_idna
from ._internal import _get_environ
from .datastructures import ImmutableDict
from .datastructures import MultiDict
from .exceptions import BadHost
from .exceptions import BadRequest
from .exceptions import HTTPException
from .exceptions import MethodNotAllowed
from .exceptions import NotFound
from .urls import _fast_url_quote
from .urls import url_encode
from .urls import url_join
from .urls import url_quote
from .utils import cached_property
from .utils import format_string
from .utils import redirect
from .wsgi import get_host
_rule_re = re.compile(
r"""
(?P<static>[^<]*) # static rule data
<
(?:
(?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name
(?:\((?P<args>.*?)\))? # converter arguments
\: # variable delimiter
)?
(?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name
>
""",
re.VERBOSE,
)
_simple_rule_re = re.compile(r"<([^>]+)>")
_converter_args_re = re.compile(
r"""
((?P<name>\w+)\s*=\s*)?
(?P<value>
True|False|
\d+.\d+|
\d+.|
\d+|
[\w\d_.]+|
[urUR]?(?P<stringval>"[^"]*?"|'[^']*')
)\s*,
""",
re.VERBOSE | re.UNICODE,
)
_PYTHON_CONSTANTS = {"None": None, "True": True, "False": False}
def _pythonize(value):
if value in _PYTHON_CONSTANTS:
return _PYTHON_CONSTANTS[value]
for convert in int, float:
try:
return convert(value)
except ValueError:
pass
if value[:1] == value[-1:] and value[0] in "\"'":
value = value[1:-1]
return text_type(value)
def parse_converter_args(argstr):
argstr += ","
args = []
kwargs = {}
for item in _converter_args_re.finditer(argstr):
value = item.group("stringval")
if value is None:
value = item.group("value")
value = _pythonize(value)
if not item.group("name"):
args.append(value)
else:
name = item.group("name")
kwargs[name] = value
return tuple(args), kwargs
def parse_rule(rule):
"""Parse a rule and return it as generator. Each iteration yields tuples
in the form ``(converter, arguments, variable)``. If the converter is
`None` it's a static url part, otherwise it's a dynamic one.
:internal:
"""
pos = 0
end = len(rule)
do_match = _rule_re.match
used_names = set()
while pos < end:
m = do_match(rule, pos)
if m is None:
break
data = m.groupdict()
if data["static"]:
yield None, None, data["static"]
variable = data["variable"]
converter = data["converter"] or "default"
if variable in used_names:
raise ValueError("variable name %r used twice." % variable)
used_names.add(variable)
yield converter, data["args"] or None, variable
pos = m.end()
if pos < end:
remaining = rule[pos:]
if ">" in remaining or "<" in remaining:
raise ValueError("malformed url rule: %r" % rule)
yield None, None, remaining
class RoutingException(Exception):
"""Special exceptions that require the application to redirect, notifying
about missing urls, etc.
:internal:
"""
class RequestRedirect(HTTPException, RoutingException):
"""Raise if the map requests a redirect. This is for example the case if
`strict_slashes` are activated and an url that requires a trailing slash.
The attribute `new_url` contains the absolute destination url.
"""
code = 308
def __init__(self, new_url):
RoutingException.__init__(self, new_url)
self.new_url = new_url
def get_response(self, environ=None):
return redirect(self.new_url, self.code)
class RequestPath(RoutingException):
"""Internal exception."""
__slots__ = ("path_info",)
def __init__(self, path_info):
self.path_info = path_info
class RequestAliasRedirect(RoutingException): # noqa: B903
"""This rule is an alias and wants to redirect to the canonical URL."""
def __init__(self, matched_values):
self.matched_values = matched_values
@implements_to_string
class BuildError(RoutingException, LookupError):
"""Raised if the build system cannot find a URL for an endpoint with the
values provided.
"""
def __init__(self, endpoint, values, method, adapter=None):
LookupError.__init__(self, endpoint, values, method)
self.endpoint = endpoint
self.values = values
self.method = method
self.adapter = adapter
@cached_property
def suggested(self):
return self.closest_rule(self.adapter)
def closest_rule(self, adapter):
def _score_rule(rule):
return sum(
[
0.98
* difflib.SequenceMatcher(
None, rule.endpoint, self.endpoint
).ratio(),
0.01 * bool(set(self.values or ()).issubset(rule.arguments)),
0.01 * bool(rule.methods and self.method in rule.methods),
]
)
if adapter and adapter.map._rules:
return max(adapter.map._rules, key=_score_rule)
def __str__(self):
message = []
message.append("Could not build url for endpoint %r" % self.endpoint)
if self.method:
message.append(" (%r)" % self.method)
if self.values:
message.append(" with values %r" % sorted(self.values.keys()))
message.append(".")
if self.suggested:
if self.endpoint == self.suggested.endpoint:
if self.method and self.method not in self.suggested.methods:
message.append(
" Did you mean to use methods %r?"
% sorted(self.suggested.methods)
)
missing_values = self.suggested.arguments.union(
set(self.suggested.defaults or ())
) - set(self.values.keys())
if missing_values:
message.append(
" Did you forget to specify values %r?" % sorted(missing_values)
)
else:
message.append(" Did you mean %r instead?" % self.suggested.endpoint)
return u"".join(message)
class WebsocketMismatch(BadRequest):
"""The only matched rule is either a WebSocket and the request is
HTTP, or the rule is HTTP and the request is a WebSocket.
"""
class ValidationError(ValueError):
"""Validation error. If a rule converter raises this exception the rule
does not match the current URL and the next URL is tried.
"""
class RuleFactory(object):
"""As soon as you have more complex URL setups it's a good idea to use rule
factories to avoid repetitive tasks. Some of them are builtin, others can
be added by subclassing `RuleFactory` and overriding `get_rules`.
"""
def get_rules(self, map):
"""Subclasses of `RuleFactory` have to override this method and return
an iterable of rules."""
raise NotImplementedError()
class Subdomain(RuleFactory):
"""All URLs provided by this factory have the subdomain set to a
specific domain. For example if you want to use the subdomain for
the current language this can be a good setup::
url_map = Map([
Rule('/', endpoint='#select_language'),
Subdomain('<string(length=2):lang_code>', [
Rule('/', endpoint='index'),
Rule('/about', endpoint='about'),
Rule('/help', endpoint='help')
])
])
All the rules except for the ``'#select_language'`` endpoint will now
listen on a two letter long subdomain that holds the language code
for the current request.
"""
def __init__(self, subdomain, rules):
self.subdomain = subdomain
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.subdomain = self.subdomain
yield rule
class Submount(RuleFactory):
"""Like `Subdomain` but prefixes the URL rule with a given string::
url_map = Map([
Rule('/', endpoint='index'),
Submount('/blog', [
Rule('/', endpoint='blog/index'),
Rule('/entry/<entry_slug>', endpoint='blog/show')
])
])
Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
"""
def __init__(self, path, rules):
self.path = path.rstrip("/")
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.rule = self.path + rule.rule
yield rule
class EndpointPrefix(RuleFactory):
"""Prefixes all endpoints (which must be strings for this factory) with
another string. This can be useful for sub applications::
url_map = Map([
Rule('/', endpoint='index'),
EndpointPrefix('blog/', [Submount('/blog', [
Rule('/', endpoint='index'),
Rule('/entry/<entry_slug>', endpoint='show')
])])
])
"""
def __init__(self, prefix, rules):
self.prefix = prefix
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.endpoint = self.prefix + rule.endpoint
yield rule
class RuleTemplate(object):
"""Returns copies of the rules wrapped and expands string templates in
the endpoint, rule, defaults or subdomain sections.
Here a small example for such a rule template::
from werkzeug.routing import Map, Rule, RuleTemplate
resource = RuleTemplate([
Rule('/$name/', endpoint='$name.list'),
Rule('/$name/<int:id>', endpoint='$name.show')
])
url_map = Map([resource(name='user'), resource(name='page')])
When a rule template is called the keyword arguments are used to
replace the placeholders in all the string parameters.
"""
def __init__(self, rules):
self.rules = list(rules)
def __call__(self, *args, **kwargs):
return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
class RuleTemplateFactory(RuleFactory):
"""A factory that fills in template variables into rules. Used by
`RuleTemplate` internally.
:internal:
"""
def __init__(self, rules, context):
self.rules = rules
self.context = context
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
new_defaults = subdomain = None
if rule.defaults:
new_defaults = {}
for key, value in iteritems(rule.defaults):
if isinstance(value, string_types):
value = format_string(value, self.context)
new_defaults[key] = value
if rule.subdomain is not None:
subdomain = format_string(rule.subdomain, self.context)
new_endpoint = rule.endpoint
if isinstance(new_endpoint, string_types):
new_endpoint = format_string(new_endpoint, self.context)
yield Rule(
format_string(rule.rule, self.context),
new_defaults,
subdomain,
rule.methods,
rule.build_only,
new_endpoint,
rule.strict_slashes,
)
def _prefix_names(src):
"""ast parse and prefix names with `.` to avoid collision with user vars"""
tree = ast.parse(src).body[0]
if isinstance(tree, ast.Expr):
tree = tree.value
for node in ast.walk(tree):
if isinstance(node, ast.Name):
node.id = "." + node.id
return tree
_CALL_CONVERTER_CODE_FMT = "self._converters[{elem!r}].to_url()"
_IF_KWARGS_URL_ENCODE_CODE = """\
if kwargs:
q = '?'
params = self._encode_query_vars(kwargs)
else:
q = params = ''
"""
_IF_KWARGS_URL_ENCODE_AST = _prefix_names(_IF_KWARGS_URL_ENCODE_CODE)
_URL_ENCODE_AST_NAMES = (_prefix_names("q"), _prefix_names("params"))
@implements_to_string
class Rule(RuleFactory):
"""A Rule represents one URL pattern. There are some options for `Rule`
that change the way it behaves and are passed to the `Rule` constructor.
Note that besides the rule-string all arguments *must* be keyword arguments
in order to not break the application on Werkzeug upgrades.
`string`
Rule strings basically are just normal URL paths with placeholders in
the format ``<converter(arguments):name>`` where the converter and the
arguments are optional. If no converter is defined the `default`
converter is used which means `string` in the normal configuration.
URL rules that end with a slash are branch URLs, others are leaves.
If you have `strict_slashes` enabled (which is the default), all
branch URLs that are matched without a trailing slash will trigger a
redirect to the same URL with the missing slash appended.
The converters are defined on the `Map`.
`endpoint`
The endpoint for this rule. This can be anything. A reference to a
function, a string, a number etc. The preferred way is using a string
because the endpoint is used for URL generation.
`defaults`
An optional dict with defaults for other rules with the same endpoint.
This is a bit tricky but useful if you want to have unique URLs::
url_map = Map([
Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
Rule('/all/page/<int:page>', endpoint='all_entries')
])
If a user now visits ``http://example.com/all/page/1`` he will be
redirected to ``http://example.com/all/``. If `redirect_defaults` is
disabled on the `Map` instance this will only affect the URL
generation.
`subdomain`
The subdomain rule string for this rule. If not specified the rule
only matches for the `default_subdomain` of the map. If the map is
not bound to a subdomain this feature is disabled.
Can be useful if you want to have user profiles on different subdomains
and all subdomains are forwarded to your application::
url_map = Map([
Rule('/', subdomain='<username>', endpoint='user/homepage'),
Rule('/stats', subdomain='<username>', endpoint='user/stats')
])
`methods`
A sequence of http methods this rule applies to. If not specified, all
methods are allowed. For example this can be useful if you want different
endpoints for `POST` and `GET`. If methods are defined and the path
matches but the method matched against is not in this list or in the
list of another rule for that path the error raised is of the type
`MethodNotAllowed` rather than `NotFound`. If `GET` is present in the
list of methods and `HEAD` is not, `HEAD` is added automatically.
`strict_slashes`
Override the `Map` setting for `strict_slashes` only for this rule. If
not specified the `Map` setting is used.
`merge_slashes`
Override :attr:`Map.merge_slashes` for this rule.
`build_only`
Set this to True and the rule will never match but will create a URL
that can be build. This is useful if you have resources on a subdomain
or folder that are not handled by the WSGI application (like static data)
`redirect_to`
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax::
def foo_with_slug(adapter, id):
# ask the database for the slug for the old id. this of
# course has nothing to do with werkzeug.
return 'foo/' + Foo.get_slug_for_id(id)
url_map = Map([
Rule('/foo/<slug>', endpoint='foo'),
Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
])
When the rule is matched the routing system will raise a
`RequestRedirect` exception with the target for the redirect.
Keep in mind that the URL will be joined against the URL root of the
script so don't use a leading slash on the target URL unless you
really mean root of that domain.
`alias`
If enabled this rule serves as an alias for another rule with the same
endpoint and arguments.
`host`
If provided and the URL map has host matching enabled this can be
used to provide a match rule for the whole host. This also means
that the subdomain feature is disabled.
`websocket`
If ``True``, this rule is only matches for WebSocket (``ws://``,
``wss://``) requests. By default, rules will only match for HTTP
requests.
.. versionadded:: 1.0
Added ``websocket``.
.. versionadded:: 1.0
Added ``merge_slashes``.
.. versionadded:: 0.7
Added ``alias`` and ``host``.
.. versionchanged:: 0.6.1
``HEAD`` is added to ``methods`` if ``GET`` is present.
"""
def __init__(
self,
string,
defaults=None,
subdomain=None,
methods=None,
build_only=False,
endpoint=None,
strict_slashes=None,
merge_slashes=None,
redirect_to=None,
alias=False,
host=None,
websocket=False,
):
if not string.startswith("/"):
raise ValueError("urls must start with a leading slash")
self.rule = string
self.is_leaf = not string.endswith("/")
self.map = None
self.strict_slashes = strict_slashes
self.merge_slashes = merge_slashes
self.subdomain = subdomain
self.host = host
self.defaults = defaults
self.build_only = build_only
self.alias = alias
self.websocket = websocket
if methods is not None:
if isinstance(methods, str):
raise TypeError("'methods' should be a list of strings.")
methods = {x.upper() for x in methods}
if "HEAD" not in methods and "GET" in methods:
methods.add("HEAD")
if websocket and methods - {"GET", "HEAD", "OPTIONS"}:
raise ValueError(
"WebSocket rules can only use 'GET', 'HEAD', and 'OPTIONS' methods."
)
self.methods = methods
self.endpoint = endpoint
self.redirect_to = redirect_to
if defaults:
self.arguments = set(map(str, defaults))
else:
self.arguments = set()
self._trace = self._converters = self._regex = self._argument_weights = None
def empty(self):
"""
Return an unbound copy of this rule.
This can be useful if want to reuse an already bound URL for another
map. See ``get_empty_kwargs`` to override what keyword arguments are
provided to the new copy.
"""
return type(self)(self.rule, **self.get_empty_kwargs())
def get_empty_kwargs(self):
"""
Provides kwargs for instantiating empty copy with empty()
Use this method to provide custom keyword arguments to the subclass of
``Rule`` when calling ``some_rule.empty()``. Helpful when the subclass
has custom keyword arguments that are needed at instantiation.
Must return a ``dict`` that will be provided as kwargs to the new
instance of ``Rule``, following the initial ``self.rule`` value which
is always provided as the first, required positional argument.
"""
defaults = None
if self.defaults:
defaults = dict(self.defaults)
return dict(
defaults=defaults,
subdomain=self.subdomain,
methods=self.methods,
build_only=self.build_only,
endpoint=self.endpoint,
strict_slashes=self.strict_slashes,
redirect_to=self.redirect_to,
alias=self.alias,
host=self.host,
)
def get_rules(self, map):
yield self
def refresh(self):
"""Rebinds and refreshes the URL. Call this if you modified the
rule in place.
:internal:
"""
self.bind(self.map, rebind=True)
def bind(self, map, rebind=False):
"""Bind the url to a map and create a regular expression based on
the information from the rule itself and the defaults from the map.
:internal:
"""
if self.map is not None and not rebind:
raise RuntimeError("url rule %r already bound to map %r" % (self, self.map))
self.map = map
if self.strict_slashes is None:
self.strict_slashes = map.strict_slashes
if self.merge_slashes is None:
self.merge_slashes = map.merge_slashes
if self.subdomain is None:
self.subdomain = map.default_subdomain
self.compile()
def get_converter(self, variable_name, converter_name, args, kwargs):
"""Looks up the converter for the given parameter.
.. versionadded:: 0.9
"""
if converter_name not in self.map.converters:
raise LookupError("the converter %r does not exist" % converter_name)
return self.map.converters[converter_name](self.map, *args, **kwargs)
def _encode_query_vars(self, query_vars):
return url_encode(
query_vars,
charset=self.map.charset,
sort=self.map.sort_parameters,
key=self.map.sort_key,
)
def compile(self):
"""Compiles the regular expression and stores it."""
assert self.map is not None, "rule not bound"
if self.map.host_matching:
domain_rule = self.host or ""
else:
domain_rule = self.subdomain or ""
self._trace = []
self._converters = {}
self._static_weights = []
self._argument_weights = []
regex_parts = []
def _build_regex(rule):
index = 0
for converter, arguments, variable in parse_rule(rule):
if converter is None:
for match in re.finditer(r"/+|[^/]+", variable):
part = match.group(0)
if part.startswith("/"):
if self.merge_slashes:
regex_parts.append(r"/+?")
self._trace.append((False, "/"))
else:
regex_parts.append(part)
self._trace.append((False, part))
continue
self._trace.append((False, part))
regex_parts.append(re.escape(part))
if part:
self._static_weights.append((index, -len(part)))
else:
if arguments:
c_args, c_kwargs = parse_converter_args(arguments)
else:
c_args = ()
c_kwargs = {}
convobj = self.get_converter(variable, converter, c_args, c_kwargs)
regex_parts.append("(?P<%s>%s)" % (variable, convobj.regex))
self._converters[variable] = convobj
self._trace.append((True, variable))
self._argument_weights.append(convobj.weight)
self.arguments.add(str(variable))
index = index + 1
_build_regex(domain_rule)
regex_parts.append("\\|")
self._trace.append((False, "|"))
_build_regex(self.rule if self.is_leaf else self.rule.rstrip("/"))
if not self.is_leaf:
self._trace.append((False, "/"))
self._build = self._compile_builder(False).__get__(self, None)
self._build_unknown = self._compile_builder(True).__get__(self, None)
if self.build_only:
return
if not (self.is_leaf and self.strict_slashes):
reps = u"*" if self.merge_slashes else u"?"
tail = u"(?<!/)(?P<__suffix__>/%s)" % reps
else:
tail = u""
regex = u"^%s%s$" % (u"".join(regex_parts), tail)
self._regex = re.compile(regex, re.UNICODE)
def match(self, path, method=None):
"""Check if the rule matches a given path. Path is a string in the
form ``"subdomain|/path"`` and is assembled by the map. If
the map is doing host matching the subdomain part will be the host
instead.
If the rule matches a dict with the converted values is returned,
otherwise the return value is `None`.
:internal:
"""
if not self.build_only:
require_redirect = False
m = self._regex.search(path)
if m is not None:
groups = m.groupdict()
# we have a folder like part of the url without a trailing
# slash and strict slashes enabled. raise an exception that
# tells the map to redirect to the same url but with a
# trailing slash
if (
self.strict_slashes
and not self.is_leaf
and not groups.pop("__suffix__")
and (
method is None or self.methods is None or method in self.methods
)
):
path += "/"
require_redirect = True
# if we are not in strict slashes mode we have to remove
# a __suffix__
elif not self.strict_slashes:
del groups["__suffix__"]
result = {}
for name, value in iteritems(groups):
try:
value = self._converters[name].to_python(value)
except ValidationError:
return
result[str(name)] = value
if self.defaults:
result.update(self.defaults)
if self.merge_slashes:
new_path = "|".join(self.build(result, False))
if path.endswith("/") and not new_path.endswith("/"):
new_path += "/"
if new_path.count("/") < path.count("/"):
path = new_path
require_redirect = True
if require_redirect:
path = path.split("|", 1)[1]
raise RequestPath(path)
if self.alias and self.map.redirect_defaults:
raise RequestAliasRedirect(result)
return result
@staticmethod
def _get_func_code(code, name):
globs, locs = {}, {}
exec(code, globs, locs)
return locs[name]
def _compile_builder(self, append_unknown=True):
defaults = self.defaults or {}
dom_ops = []
url_ops = []
opl = dom_ops
for is_dynamic, data in self._trace:
if data == "|" and opl is dom_ops:
opl = url_ops
continue
# this seems like a silly case to ever come up but:
# if a default is given for a value that appears in the rule,
# resolve it to a constant ahead of time
if is_dynamic and data in defaults:
data = self._converters[data].to_url(defaults[data])
opl.append((False, data))
elif not is_dynamic:
opl.append(
(False, url_quote(to_bytes(data, self.map.charset), safe="/:|+"))
)
else:
opl.append((True, data))
def _convert(elem):
ret = _prefix_names(_CALL_CONVERTER_CODE_FMT.format(elem=elem))
ret.args = [ast.Name(str(elem), ast.Load())] # str for py2
return ret
def _parts(ops):
parts = [
_convert(elem) if is_dynamic else ast.Str(s=elem)
for is_dynamic, elem in ops
]
parts = parts or [ast.Str("")]
# constant fold
ret = [parts[0]]
for p in parts[1:]:
if isinstance(p, ast.Str) and isinstance(ret[-1], ast.Str):
ret[-1] = ast.Str(ret[-1].s + p.s)
else:
ret.append(p)
return ret
dom_parts = _parts(dom_ops)
url_parts = _parts(url_ops)
if not append_unknown:
body = []
else:
body = [_IF_KWARGS_URL_ENCODE_AST]
url_parts.extend(_URL_ENCODE_AST_NAMES)
def _join(parts):
if len(parts) == 1: # shortcut
return parts[0]
elif hasattr(ast, "JoinedStr"): # py36+
return ast.JoinedStr(parts)
else:
call = _prefix_names('"".join()')
call.args = [ast.Tuple(parts, ast.Load())]
return call
body.append(
ast.Return(ast.Tuple([_join(dom_parts), _join(url_parts)], ast.Load()))
)
# str is necessary for python2
pargs = [
str(elem)
for is_dynamic, elem in dom_ops + url_ops
if is_dynamic and elem not in defaults
]
kargs = [str(k) for k in defaults]
func_ast = _prefix_names("def _(): pass")
func_ast.name = "<builder:{!r}>".format(self.rule)
if hasattr(ast, "arg"): # py3
func_ast.args.args.append(ast.arg(".self", None))
for arg in pargs + kargs:
func_ast.args.args.append(ast.arg(arg, None))
func_ast.args.kwarg = ast.arg(".kwargs", None)
else:
func_ast.args.args.append(ast.Name(".self", ast.Param()))
for arg in pargs + kargs:
func_ast.args.args.append(ast.Name(arg, ast.Param()))
func_ast.args.kwarg = ".kwargs"
for _ in kargs:
func_ast.args.defaults.append(ast.Str(""))
func_ast.body = body
# use `ast.parse` instead of `ast.Module` for better portability
# python3.8 changes the signature of `ast.Module`
module = ast.parse("")
module.body = [func_ast]
# mark everything as on line 1, offset 0
# less error-prone than `ast.fix_missing_locations`
# bad line numbers cause an assert to fail in debug builds
for node in ast.walk(module):
if "lineno" in node._attributes:
node.lineno = 1
if "col_offset" in node._attributes:
node.col_offset = 0
code = compile(module, "<werkzeug routing>", "exec")
return self._get_func_code(code, func_ast.name)
def build(self, values, append_unknown=True):
"""Assembles the relative url for that rule and the subdomain.
If building doesn't work for some reasons `None` is returned.
:internal:
"""
try:
if append_unknown:
return self._build_unknown(**values)
else:
return self._build(**values)
except ValidationError:
return None
def provides_defaults_for(self, rule):
"""Check if this rule has defaults for a given rule.
:internal:
"""
return (
not self.build_only
and self.defaults
and self.endpoint == rule.endpoint
and self != rule
and self.arguments == rule.arguments
)
def suitable_for(self, values, method=None):
"""Check if the dict of values has enough data for url generation.
:internal:
"""
# if a method was given explicitly and that method is not supported
# by this rule, this rule is not suitable.
if (
method is not None
and self.methods is not None
and method not in self.methods
):
return False
defaults = self.defaults or ()
# all arguments required must be either in the defaults dict or
# the value dictionary otherwise it's not suitable
for key in self.arguments:
if key not in defaults and key not in values:
return False
# in case defaults are given we ensure that either the value was
# skipped or the value is the same as the default value.
if defaults:
for key, value in iteritems(defaults):
if key in values and value != values[key]:
return False
return True
def match_compare_key(self):
"""The match compare key for sorting.
Current implementation:
1. rules without any arguments come first for performance
reasons only as we expect them to match faster and some
common ones usually don't have any arguments (index pages etc.)
2. rules with more static parts come first so the second argument
is the negative length of the number of the static weights.
3. we order by static weights, which is a combination of index
and length
4. The more complex rules come first so the next argument is the
negative length of the number of argument weights.
5. lastly we order by the actual argument weights.
:internal:
"""
return (
bool(self.arguments),
-len(self._static_weights),
self._static_weights,
-len(self._argument_weights),
self._argument_weights,
)
def build_compare_key(self):
"""The build compare key for sorting.
:internal:
"""
return 1 if self.alias else 0, -len(self.arguments), -len(self.defaults or ())
def __eq__(self, other):
return self.__class__ is other.__class__ and self._trace == other._trace
__hash__ = None
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.rule
@native_string_result
def __repr__(self):
if self.map is None:
return u"<%s (unbound)>" % self.__class__.__name__
tmp = []
for is_dynamic, data in self._trace:
if is_dynamic:
tmp.append(u"<%s>" % data)
else:
tmp.append(data)
return u"<%s %s%s -> %s>" % (
self.__class__.__name__,
repr((u"".join(tmp)).lstrip(u"|")).lstrip(u"u"),
self.methods is not None and u" (%s)" % u", ".join(self.methods) or u"",
self.endpoint,
)
class BaseConverter(object):
"""Base class for all converters."""
regex = "[^/]+"
weight = 100
def __init__(self, map):
self.map = map
def to_python(self, value):
return value
def to_url(self, value):
if isinstance(value, (bytes, bytearray)):
return _fast_url_quote(value)
return _fast_url_quote(text_type(value).encode(self.map.charset))
class UnicodeConverter(BaseConverter):
"""This converter is the default converter and accepts any string but
only one path segment. Thus the string can not include a slash.
This is the default validator.
Example::
Rule('/pages/<page>'),
Rule('/<string(length=2):lang_code>')
:param map: the :class:`Map`.
:param minlength: the minimum length of the string. Must be greater
or equal 1.
:param maxlength: the maximum length of the string.
:param length: the exact length of the string.
"""
def __init__(self, map, minlength=1, maxlength=None, length=None):
BaseConverter.__init__(self, map)
if length is not None:
length = "{%d}" % int(length)
else:
if maxlength is None:
maxlength = ""
else:
maxlength = int(maxlength)
length = "{%s,%s}" % (int(minlength), maxlength)
self.regex = "[^/]" + length
class AnyConverter(BaseConverter):
"""Matches one of the items provided. Items can either be Python
identifiers or strings::
Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>')
:param map: the :class:`Map`.
:param items: this function accepts the possible items as positional
arguments.
"""
def __init__(self, map, *items):
BaseConverter.__init__(self, map)
self.regex = "(?:%s)" % "|".join([re.escape(x) for x in items])
class PathConverter(BaseConverter):
"""Like the default :class:`UnicodeConverter`, but it also matches
slashes. This is useful for wikis and similar applications::
Rule('/<path:wikipage>')
Rule('/<path:wikipage>/edit')
:param map: the :class:`Map`.
"""
regex = "[^/].*?"
weight = 200
class NumberConverter(BaseConverter):
"""Baseclass for `IntegerConverter` and `FloatConverter`.
:internal:
"""
weight = 50
def __init__(self, map, fixed_digits=0, min=None, max=None, signed=False):
if signed:
self.regex = self.signed_regex
BaseConverter.__init__(self, map)
self.fixed_digits = fixed_digits
self.min = min
self.max = max
self.signed = signed
def to_python(self, value):
if self.fixed_digits and len(value) != self.fixed_digits:
raise ValidationError()
value = self.num_convert(value)
if (self.min is not None and value < self.min) or (
self.max is not None and value > self.max
):
raise ValidationError()
return value
def to_url(self, value):
value = self.num_convert(value)
if self.fixed_digits:
value = ("%%0%sd" % self.fixed_digits) % value
return str(value)
@property
def signed_regex(self):
return r"-?" + self.regex
class IntegerConverter(NumberConverter):
"""This converter only accepts integer values::
Rule("/page/<int:page>")
By default it only accepts unsigned, positive values. The ``signed``
parameter will enable signed, negative values. ::
Rule("/page/<int(signed=True):page>")
:param map: The :class:`Map`.
:param fixed_digits: The number of fixed digits in the URL. If you
set this to ``4`` for example, the rule will only match if the
URL looks like ``/0001/``. The default is variable length.
:param min: The minimal value.
:param max: The maximal value.
:param signed: Allow signed (negative) values.
.. versionadded:: 0.15
The ``signed`` parameter.
"""
regex = r"\d+"
num_convert = int
class FloatConverter(NumberConverter):
"""This converter only accepts floating point values::
Rule("/probability/<float:probability>")
By default it only accepts unsigned, positive values. The ``signed``
parameter will enable signed, negative values. ::
Rule("/offset/<float(signed=True):offset>")
:param map: The :class:`Map`.
:param min: The minimal value.
:param max: The maximal value.
:param signed: Allow signed (negative) values.
.. versionadded:: 0.15
The ``signed`` parameter.
"""
regex = r"\d+\.\d+"
num_convert = float
def __init__(self, map, min=None, max=None, signed=False):
NumberConverter.__init__(self, map, min=min, max=max, signed=signed)
class UUIDConverter(BaseConverter):
"""This converter only accepts UUID strings::
Rule('/object/<uuid:identifier>')
.. versionadded:: 0.10
:param map: the :class:`Map`.
"""
regex = (
r"[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-"
r"[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"
)
def to_python(self, value):
return uuid.UUID(value)
def to_url(self, value):
return str(value)
#: the default converter mapping for the map.
DEFAULT_CONVERTERS = {
"default": UnicodeConverter,
"string": UnicodeConverter,
"any": AnyConverter,
"path": PathConverter,
"int": IntegerConverter,
"float": FloatConverter,
"uuid": UUIDConverter,
}
class Map(object):
"""The map class stores all the URL rules and some configuration
parameters. Some of the configuration values are only stored on the
`Map` instance since those affect all rules, others are just defaults
and can be overridden for each rule. Note that you have to specify all
arguments besides the `rules` as keyword arguments!
:param rules: sequence of url rules for this map.
:param default_subdomain: The default subdomain for rules without a
subdomain defined.
:param charset: charset of the url. defaults to ``"utf-8"``
:param strict_slashes: If a rule ends with a slash but the matched
URL does not, redirect to the URL with a trailing slash.
:param merge_slashes: Merge consecutive slashes when matching or
building URLs. Matches will redirect to the normalized URL.
Slashes in variable parts are not merged.
:param redirect_defaults: This will redirect to the default rule if it
wasn't visited that way. This helps creating
unique URLs.
:param converters: A dict of converters that adds additional converters
to the list of converters. If you redefine one
converter this will override the original one.
:param sort_parameters: If set to `True` the url parameters are sorted.
See `url_encode` for more details.
:param sort_key: The sort key function for `url_encode`.
:param encoding_errors: the error method to use for decoding
:param host_matching: if set to `True` it enables the host matching
feature and disables the subdomain one. If
enabled the `host` parameter to rules is used
instead of the `subdomain` one.
.. versionchanged:: 1.0
If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules
will match.
.. versionchanged:: 1.0
Added ``merge_slashes``.
.. versionchanged:: 0.7
Added ``encoding_errors`` and ``host_matching``.
.. versionchanged:: 0.5
Added ``sort_parameters`` and ``sort_key``.
"""
#: A dict of default converters to be used.
default_converters = ImmutableDict(DEFAULT_CONVERTERS)
#: The type of lock to use when updating.
#:
#: .. versionadded:: 1.0
lock_class = Lock
def __init__(
self,
rules=None,
default_subdomain="",
charset="utf-8",
strict_slashes=True,
merge_slashes=True,
redirect_defaults=True,
converters=None,
sort_parameters=False,
sort_key=None,
encoding_errors="replace",
host_matching=False,
):
self._rules = []
self._rules_by_endpoint = {}
self._remap = True
self._remap_lock = self.lock_class()
self.default_subdomain = default_subdomain
self.charset = charset
self.encoding_errors = encoding_errors
self.strict_slashes = strict_slashes
self.merge_slashes = merge_slashes
self.redirect_defaults = redirect_defaults
self.host_matching = host_matching
self.converters = self.default_converters.copy()
if converters:
self.converters.update(converters)
self.sort_parameters = sort_parameters
self.sort_key = sort_key
for rulefactory in rules or ():
self.add(rulefactory)
def is_endpoint_expecting(self, endpoint, *arguments):
"""Iterate over all rules and check if the endpoint expects
the arguments provided. This is for example useful if you have
some URLs that expect a language code and others that do not and
you want to wrap the builder a bit so that the current language
code is automatically added if not provided but endpoints expect
it.
:param endpoint: the endpoint to check.
:param arguments: this function accepts one or more arguments
as positional arguments. Each one of them is
checked.
"""
self.update()
arguments = set(arguments)
for rule in self._rules_by_endpoint[endpoint]:
if arguments.issubset(rule.arguments):
return True
return False
def iter_rules(self, endpoint=None):
"""Iterate over all rules or the rules of an endpoint.
:param endpoint: if provided only the rules for that endpoint
are returned.
:return: an iterator
"""
self.update()
if endpoint is not None:
return iter(self._rules_by_endpoint[endpoint])
return iter(self._rules)
def add(self, rulefactory):
"""Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
"""
for rule in rulefactory.get_rules(self):
rule.bind(self)
self._rules.append(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True
def bind(
self,
server_name,
script_name=None,
subdomain=None,
url_scheme="http",
default_method="GET",
path_info=None,
query_args=None,
):
"""Return a new :class:`MapAdapter` with the details specified to the
call. Note that `script_name` will default to ``'/'`` if not further
specified or `None`. The `server_name` at least is a requirement
because the HTTP RFC requires absolute URLs for redirects and so all
redirect exceptions raised by Werkzeug will contain the full canonical
URL.
If no path_info is passed to :meth:`match` it will use the default path
info passed to bind. While this doesn't really make sense for
manual bind calls, it's useful if you bind a map to a WSGI
environment which already contains the path info.
`subdomain` will default to the `default_subdomain` for this map if
no defined. If there is no `default_subdomain` you cannot use the
subdomain feature.
.. versionchanged:: 1.0
If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules
will match.
.. versionchanged:: 0.15
``path_info`` defaults to ``'/'`` if ``None``.
.. versionchanged:: 0.8
``query_args`` can be a string.
.. versionchanged:: 0.7
Added ``query_args``.
"""
server_name = server_name.lower()
if self.host_matching:
if subdomain is not None:
raise RuntimeError("host matching enabled and a subdomain was provided")
elif subdomain is None:
subdomain = self.default_subdomain
if script_name is None:
script_name = "/"
if path_info is None:
path_info = "/"
try:
server_name = _encode_idna(server_name)
except UnicodeError:
raise BadHost()
return MapAdapter(
self,
server_name,
script_name,
subdomain,
url_scheme,
path_info,
default_method,
query_args,
)
def bind_to_environ(self, environ, server_name=None, subdomain=None):
"""Like :meth:`bind` but you can pass it an WSGI environment and it
will fetch the information from that dictionary. Note that because of
limitations in the protocol there is no way to get the current
subdomain and real `server_name` from the environment. If you don't
provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
`HTTP_HOST` if provided) as used `server_name` with disabled subdomain
feature.
If `subdomain` is `None` but an environment and a server name is
provided it will calculate the current subdomain automatically.
Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
subdomain will be ``'staging.dev'``.
If the object passed as environ has an environ attribute, the value of
this attribute is used instead. This allows you to pass request
objects. Additionally `PATH_INFO` added as a default of the
:class:`MapAdapter` so that you don't have to pass the path info to
the match method.
.. versionchanged:: 1.0.0
If the passed server name specifies port 443, it will match
if the incoming scheme is ``https`` without a port.
.. versionchanged:: 1.0.0
A warning is shown when the passed server name does not
match the incoming WSGI server name.
.. versionchanged:: 0.8
This will no longer raise a ValueError when an unexpected server
name was passed.
.. versionchanged:: 0.5
previously this method accepted a bogus `calculate_subdomain`
parameter that did not have any effect. It was removed because
of that.
:param environ: a WSGI environment.
:param server_name: an optional server name hint (see above).
:param subdomain: optionally the current subdomain (see above).
"""
environ = _get_environ(environ)
wsgi_server_name = get_host(environ).lower()
scheme = environ["wsgi.url_scheme"]
if server_name is None:
server_name = wsgi_server_name
else:
server_name = server_name.lower()
# strip standard port to match get_host()
if scheme == "http" and server_name.endswith(":80"):
server_name = server_name[:-3]
elif scheme == "https" and server_name.endswith(":443"):
server_name = server_name[:-4]
if subdomain is None and not self.host_matching:
cur_server_name = wsgi_server_name.split(".")
real_server_name = server_name.split(".")
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
# This can happen even with valid configs if the server was
# accessed directly by IP address under some situations.
# Instead of raising an exception like in Werkzeug 0.7 or
# earlier we go by an invalid subdomain which will result
# in a 404 error on matching.
warnings.warn(
"Current server name '{}' doesn't match configured"
" server name '{}'".format(wsgi_server_name, server_name),
stacklevel=2,
)
subdomain = "<invalid>"
else:
subdomain = ".".join(filter(None, cur_server_name[:offset]))
def _get_wsgi_string(name):
val = environ.get(name)
if val is not None:
return wsgi_decoding_dance(val, self.charset)
script_name = _get_wsgi_string("SCRIPT_NAME")
path_info = _get_wsgi_string("PATH_INFO")
query_args = _get_wsgi_string("QUERY_STRING")
return Map.bind(
self,
server_name,
script_name,
subdomain,
scheme,
environ["REQUEST_METHOD"],
path_info,
query_args=query_args,
)
def update(self):
"""Called before matching and building to keep the compiled rules
in the correct order after things changed.
"""
if not self._remap:
return
with self._remap_lock:
if not self._remap:
return
self._rules.sort(key=lambda x: x.match_compare_key())
for rules in itervalues(self._rules_by_endpoint):
rules.sort(key=lambda x: x.build_compare_key())
self._remap = False
def __repr__(self):
rules = self.iter_rules()
return "%s(%s)" % (self.__class__.__name__, pformat(list(rules)))
class MapAdapter(object):
"""Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
the URL matching and building based on runtime information.
"""
def __init__(
self,
map,
server_name,
script_name,
subdomain,
url_scheme,
path_info,
default_method,
query_args=None,
):
self.map = map
self.server_name = to_unicode(server_name)
script_name = to_unicode(script_name)
if not script_name.endswith(u"/"):
script_name += u"/"
self.script_name = script_name
self.subdomain = to_unicode(subdomain)
self.url_scheme = to_unicode(url_scheme)
self.path_info = to_unicode(path_info)
self.default_method = to_unicode(default_method)
self.query_args = query_args
self.websocket = self.url_scheme in {"ws", "wss"}
def dispatch(
self, view_func, path_info=None, method=None, catch_http_exceptions=False
):
"""Does the complete dispatching process. `view_func` is called with
the endpoint and a dict with the values for the view. It should
look up the view function, call it, and return a response object
or WSGI application. http exceptions are not caught by default
so that applications can display nicer error messages by just
catching them by hand. If you want to stick with the default
error messages you can pass it ``catch_http_exceptions=True`` and
it will catch the http exceptions.
Here a small example for the dispatch usage::
from werkzeug.wrappers import Request, Response
from werkzeug.wsgi import responder
from werkzeug.routing import Map, Rule
def on_index(request):
return Response('Hello from the index')
url_map = Map([Rule('/', endpoint='index')])
views = {'index': on_index}
@responder
def application(environ, start_response):
request = Request(environ)
urls = url_map.bind_to_environ(environ)
return urls.dispatch(lambda e, v: views[e](request, **v),
catch_http_exceptions=True)
Keep in mind that this method might return exception objects, too, so
use :class:`Response.force_type` to get a response object.
:param view_func: a function that is called with the endpoint as
first argument and the value dict as second. Has
to dispatch to the actual view function with this
information. (see above)
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param catch_http_exceptions: set to `True` to catch any of the
werkzeug :class:`HTTPException`\\s.
"""
try:
try:
endpoint, args = self.match(path_info, method)
except RequestRedirect as e:
return e
return view_func(endpoint, args)
except HTTPException as e:
if catch_http_exceptions:
return e
raise
def match(
self,
path_info=None,
method=None,
return_rule=False,
query_args=None,
websocket=None,
):
"""The usage is simple: you just pass the match method the current
path info as well as the method (which defaults to `GET`). The
following things can then happen:
- you receive a `NotFound` exception that indicates that no URL is
matching. A `NotFound` exception is also a WSGI application you
can call to get a default page not found page (happens to be the
same object as `werkzeug.exceptions.NotFound`)
- you receive a `MethodNotAllowed` exception that indicates that there
is a match for this URL but not for the current request method.
This is useful for RESTful applications.
- you receive a `RequestRedirect` exception with a `new_url`
attribute. This exception is used to notify you about a request
Werkzeug requests from your WSGI application. This is for example the
case if you request ``/foo`` although the correct URL is ``/foo/``
You can use the `RequestRedirect` instance as response-like object
similar to all other subclasses of `HTTPException`.
- you receive a ``WebsocketMismatch`` exception if the only
match is a WebSocket rule but the bind is an HTTP request, or
if the match is an HTTP rule but the bind is a WebSocket
request.
- you get a tuple in the form ``(endpoint, arguments)`` if there is
a match (unless `return_rule` is True, in which case you get a tuple
in the form ``(rule, arguments)``)
If the path info is not passed to the match method the default path
info of the map is used (defaults to the root URL if not defined
explicitly).
All of the exceptions raised are subclasses of `HTTPException` so they
can be used as WSGI responses. They will all render generic error or
redirect pages.
Here is a small example for matching:
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.match("/", "GET")
('index', {})
>>> urls.match("/downloads/42")
('downloads/show', {'id': 42})
And here is what happens on redirect and missing URLs:
>>> urls.match("/downloads")
Traceback (most recent call last):
...
RequestRedirect: http://example.com/downloads/
>>> urls.match("/missing")
Traceback (most recent call last):
...
NotFound: 404 Not Found
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param return_rule: return the rule that matched instead of just the
endpoint (defaults to `False`).
:param query_args: optional query arguments that are used for
automatic redirects as string or dictionary. It's
currently not possible to use the query arguments
for URL matching.
:param websocket: Match WebSocket instead of HTTP requests. A
websocket request has a ``ws`` or ``wss``
:attr:`url_scheme`. This overrides that detection.
.. versionadded:: 1.0
Added ``websocket``.
.. versionchanged:: 0.8
``query_args`` can be a string.
.. versionadded:: 0.7
Added ``query_args``.
.. versionadded:: 0.6
Added ``return_rule``.
"""
self.map.update()
if path_info is None:
path_info = self.path_info
else:
path_info = to_unicode(path_info, self.map.charset)
if query_args is None:
query_args = self.query_args
method = (method or self.default_method).upper()
if websocket is None:
websocket = self.websocket
require_redirect = False
path = u"%s|%s" % (
self.map.host_matching and self.server_name or self.subdomain,
path_info and "/%s" % path_info.lstrip("/"),
)
have_match_for = set()
websocket_mismatch = False
for rule in self.map._rules:
try:
rv = rule.match(path, method)
except RequestPath as e:
raise RequestRedirect(
self.make_redirect_url(
url_quote(e.path_info, self.map.charset, safe="/:|+"),
query_args,
)
)
except RequestAliasRedirect as e:
raise RequestRedirect(
self.make_alias_redirect_url(
path, rule.endpoint, e.matched_values, method, query_args
)
)
if rv is None:
continue
if rule.methods is not None and method not in rule.methods:
have_match_for.update(rule.methods)
continue
if rule.websocket != websocket:
websocket_mismatch = True
continue
if self.map.redirect_defaults:
redirect_url = self.get_default_redirect(rule, method, rv, query_args)
if redirect_url is not None:
raise RequestRedirect(redirect_url)
if rule.redirect_to is not None:
if isinstance(rule.redirect_to, string_types):
def _handle_match(match):
value = rv[match.group(1)]
return rule._converters[match.group(1)].to_url(value)
redirect_url = _simple_rule_re.sub(_handle_match, rule.redirect_to)
else:
redirect_url = rule.redirect_to(self, **rv)
raise RequestRedirect(
str(
url_join(
"%s://%s%s%s"
% (
self.url_scheme or "http",
self.subdomain + "." if self.subdomain else "",
self.server_name,
self.script_name,
),
redirect_url,
)
)
)
if require_redirect:
raise RequestRedirect(
self.make_redirect_url(
url_quote(path_info, self.map.charset, safe="/:|+"), query_args
)
)
if return_rule:
return rule, rv
else:
return rule.endpoint, rv
if have_match_for:
raise MethodNotAllowed(valid_methods=list(have_match_for))
if websocket_mismatch:
raise WebsocketMismatch()
raise NotFound()
def test(self, path_info=None, method=None):
"""Test if a rule would match. Works like `match` but returns `True`
if the URL matches, or `False` if it does not exist.
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
"""
try:
self.match(path_info, method)
except RequestRedirect:
pass
except HTTPException:
return False
return True
def allowed_methods(self, path_info=None):
"""Returns the valid methods that match for a given path.
.. versionadded:: 0.7
"""
try:
self.match(path_info, method="--")
except MethodNotAllowed as e:
return e.valid_methods
except HTTPException:
pass
return []
def get_host(self, domain_part):
"""Figures out the full host name for the given domain part. The
domain part is a subdomain in case host matching is disabled or
a full host name.
"""
if self.map.host_matching:
if domain_part is None:
return self.server_name
return to_unicode(domain_part, "ascii")
subdomain = domain_part
if subdomain is None:
subdomain = self.subdomain
else:
subdomain = to_unicode(subdomain, "ascii")
return (subdomain + u"." if subdomain else u"") + self.server_name
def get_default_redirect(self, rule, method, values, query_args):
"""A helper that returns the URL to redirect to if it finds one.
This is used for default redirecting only.
:internal:
"""
assert self.map.redirect_defaults
for r in self.map._rules_by_endpoint[rule.endpoint]:
# every rule that comes after this one, including ourself
# has a lower priority for the defaults. We order the ones
# with the highest priority up for building.
if r is rule:
break
if r.provides_defaults_for(rule) and r.suitable_for(values, method):
values.update(r.defaults)
domain_part, path = r.build(values)
return self.make_redirect_url(path, query_args, domain_part=domain_part)
def encode_query_args(self, query_args):
if not isinstance(query_args, string_types):
query_args = url_encode(query_args, self.map.charset)
return query_args
def make_redirect_url(self, path_info, query_args=None, domain_part=None):
"""Creates a redirect URL.
:internal:
"""
suffix = ""
if query_args:
suffix = "?" + self.encode_query_args(query_args)
return str(
"%s://%s/%s%s"
% (
self.url_scheme or "http",
self.get_host(domain_part),
posixpath.join(
self.script_name[:-1].lstrip("/"), path_info.lstrip("/")
),
suffix,
)
)
def make_alias_redirect_url(self, path, endpoint, values, method, query_args):
"""Internally called to make an alias redirect URL."""
url = self.build(
endpoint, values, method, append_unknown=False, force_external=True
)
if query_args:
url += "?" + self.encode_query_args(query_args)
assert url != path, "detected invalid alias setting. No canonical URL found"
return url
def _partial_build(self, endpoint, values, method, append_unknown):
"""Helper for :meth:`build`. Returns subdomain and path for the
rule that accepts this endpoint, values and method.
:internal:
"""
# in case the method is none, try with the default method first
if method is None:
rv = self._partial_build(
endpoint, values, self.default_method, append_unknown
)
if rv is not None:
return rv
# Default method did not match or a specific method is passed.
# Check all for first match with matching host. If no matching
# host is found, go with first result.
first_match = None
for rule in self.map._rules_by_endpoint.get(endpoint, ()):
if rule.suitable_for(values, method):
rv = rule.build(values, append_unknown)
if rv is not None:
rv = (rv[0], rv[1], rule.websocket)
if self.map.host_matching:
if rv[0] == self.server_name:
return rv
elif first_match is None:
first_match = rv
else:
return rv
return first_match
def build(
self,
endpoint,
values=None,
method=None,
force_external=False,
append_unknown=True,
):
"""Building URLs works pretty much the other way round. Instead of
`match` you call `build` and pass it the endpoint and a dict of
arguments for the placeholders.
The `build` function also accepts an argument called `force_external`
which, if you set it to `True` will force external URLs. Per default
external URLs (include the server name) will only be used if the
target URL is on a different subdomain.
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.build("index", {})
'/'
>>> urls.build("downloads/show", {'id': 42})
'/downloads/42'
>>> urls.build("downloads/show", {'id': 42}, force_external=True)
'http://example.com/downloads/42'
Because URLs cannot contain non ASCII data you will always get
bytestrings back. Non ASCII characters are urlencoded with the
charset defined on the map instance.
Additional values are converted to unicode and appended to the URL as
URL querystring parameters:
>>> urls.build("index", {'q': 'My Searchstring'})
'/?q=My+Searchstring'
When processing those additional values, lists are furthermore
interpreted as multiple values (as per
:py:class:`werkzeug.datastructures.MultiDict`):
>>> urls.build("index", {'q': ['a', 'b', 'c']})
'/?q=a&q=b&q=c'
Passing a ``MultiDict`` will also add multiple values:
>>> urls.build("index", MultiDict((('p', 'z'), ('q', 'a'), ('q', 'b'))))
'/?p=z&q=a&q=b'
If a rule does not exist when building a `BuildError` exception is
raised.
The build method accepts an argument called `method` which allows you
to specify the method you want to have an URL built for if you have
different methods for the same endpoint specified.
.. versionadded:: 0.6
the `append_unknown` parameter was added.
:param endpoint: the endpoint of the URL to build.
:param values: the values for the URL to build. Unhandled values are
appended to the URL as query parameters.
:param method: the HTTP method for the rule if there are different
URLs for different methods on the same endpoint.
:param force_external: enforce full canonical external URLs. If the URL
scheme is not provided, this will generate
a protocol-relative URL.
:param append_unknown: unknown parameters are appended to the generated
URL as query string argument. Disable this
if you want the builder to ignore those.
"""
self.map.update()
if values:
if isinstance(values, MultiDict):
temp_values = {}
# iteritems(dict, values) is like `values.lists()`
# without the call or `list()` coercion overhead.
for key, value in iteritems(dict, values):
if not value:
continue
if len(value) == 1: # flatten single item lists
value = value[0]
if value is None: # drop None
continue
temp_values[key] = value
values = temp_values
else:
# drop None
values = dict(i for i in iteritems(values) if i[1] is not None)
else:
values = {}
rv = self._partial_build(endpoint, values, method, append_unknown)
if rv is None:
raise BuildError(endpoint, values, method, self)
domain_part, path, websocket = rv
host = self.get_host(domain_part)
# Always build WebSocket routes with the scheme (browsers
# require full URLs). If bound to a WebSocket, ensure that HTTP
# routes are built with an HTTP scheme.
url_scheme = self.url_scheme
secure = url_scheme in {"https", "wss"}
if websocket:
force_external = True
url_scheme = "wss" if secure else "ws"
elif url_scheme:
url_scheme = "https" if secure else "http"
# shortcut this.
if not force_external and (
(self.map.host_matching and host == self.server_name)
or (not self.map.host_matching and domain_part == self.subdomain)
):
return "%s/%s" % (self.script_name.rstrip("/"), path.lstrip("/"))
return str(
"%s//%s%s/%s"
% (
url_scheme + ":" if url_scheme else "",
host,
self.script_name[:-1],
path.lstrip("/"),
)
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/http.py
|
# -*- coding: utf-8 -*-
"""
werkzeug.http
~~~~~~~~~~~~~
Werkzeug comes with a bunch of utilities that help Werkzeug to deal with
HTTP data. Most of the classes and functions provided by this module are
used by the wrappers, but they are useful on their own, too, especially if
the response and request objects are not used.
This covers some of the more HTTP centric features of WSGI, some other
utilities such as cookie handling are documented in the `werkzeug.utils`
module.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import base64
import re
import warnings
from datetime import datetime
from datetime import timedelta
from hashlib import md5
from time import gmtime
from time import time
from ._compat import integer_types
from ._compat import iteritems
from ._compat import PY2
from ._compat import string_types
from ._compat import text_type
from ._compat import to_bytes
from ._compat import to_unicode
from ._compat import try_coerce_native
from ._internal import _cookie_parse_impl
from ._internal import _cookie_quote
from ._internal import _make_cookie_domain
try:
from email.utils import parsedate_tz
except ImportError:
from email.Utils import parsedate_tz
try:
from urllib.request import parse_http_list as _parse_list_header
from urllib.parse import unquote_to_bytes as _unquote
except ImportError:
from urllib2 import parse_http_list as _parse_list_header
from urllib2 import unquote as _unquote
_cookie_charset = "latin1"
_basic_auth_charset = "utf-8"
# for explanation of "media-range", etc. see Sections 5.3.{1,2} of RFC 7231
_accept_re = re.compile(
r"""
( # media-range capturing-parenthesis
[^\s;,]+ # type/subtype
(?:[ \t]*;[ \t]* # ";"
(?: # parameter non-capturing-parenthesis
[^\s;,q][^\s;,]* # token that doesn't start with "q"
| # or
q[^\s;,=][^\s;,]* # token that is more than just "q"
)
)* # zero or more parameters
) # end of media-range
(?:[ \t]*;[ \t]*q= # weight is a "q" parameter
(\d*(?:\.\d+)?) # qvalue capturing-parentheses
[^,]* # "extension" accept params: who cares?
)? # accept params are optional
""",
re.VERBOSE,
)
_token_chars = frozenset(
"!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ^_`abcdefghijklmnopqrstuvwxyz|~"
)
_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
_unsafe_header_chars = set('()<>@,;:"/[]?={} \t')
_option_header_piece_re = re.compile(
r"""
;\s*,?\s* # newlines were replaced with commas
(?P<key>
"[^"\\]*(?:\\.[^"\\]*)*" # quoted string
|
[^\s;,=*]+ # token
)
(?:\*(?P<count>\d+))? # *1, optional continuation index
\s*
(?: # optionally followed by =value
(?: # equals sign, possibly with encoding
\*\s*=\s* # * indicates extended notation
(?: # optional encoding
(?P<encoding>[^\s]+?)
'(?P<language>[^\s]*?)'
)?
|
=\s* # basic notation
)
(?P<value>
"[^"\\]*(?:\\.[^"\\]*)*" # quoted string
|
[^;,]+ # token
)?
)?
\s*
""",
flags=re.VERBOSE,
)
_option_header_start_mime_type = re.compile(r",\s*([^;,\s]+)([;,]\s*.+)?")
_entity_headers = frozenset(
[
"allow",
"content-encoding",
"content-language",
"content-length",
"content-location",
"content-md5",
"content-range",
"content-type",
"expires",
"last-modified",
]
)
_hop_by_hop_headers = frozenset(
[
"connection",
"keep-alive",
"proxy-authenticate",
"proxy-authorization",
"te",
"trailer",
"transfer-encoding",
"upgrade",
]
)
HTTP_STATUS_CODES = {
100: "Continue",
101: "Switching Protocols",
102: "Processing",
103: "Early Hints", # see RFC 8297
200: "OK",
201: "Created",
202: "Accepted",
203: "Non Authoritative Information",
204: "No Content",
205: "Reset Content",
206: "Partial Content",
207: "Multi Status",
208: "Already Reported", # see RFC 5842
226: "IM Used", # see RFC 3229
300: "Multiple Choices",
301: "Moved Permanently",
302: "Found",
303: "See Other",
304: "Not Modified",
305: "Use Proxy",
306: "Switch Proxy", # unused
307: "Temporary Redirect",
308: "Permanent Redirect",
400: "Bad Request",
401: "Unauthorized",
402: "Payment Required", # unused
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
406: "Not Acceptable",
407: "Proxy Authentication Required",
408: "Request Timeout",
409: "Conflict",
410: "Gone",
411: "Length Required",
412: "Precondition Failed",
413: "Request Entity Too Large",
414: "Request URI Too Long",
415: "Unsupported Media Type",
416: "Requested Range Not Satisfiable",
417: "Expectation Failed",
418: "I'm a teapot", # see RFC 2324
421: "Misdirected Request", # see RFC 7540
422: "Unprocessable Entity",
423: "Locked",
424: "Failed Dependency",
425: "Too Early", # see RFC 8470
426: "Upgrade Required",
428: "Precondition Required", # see RFC 6585
429: "Too Many Requests",
431: "Request Header Fields Too Large",
449: "Retry With", # proprietary MS extension
451: "Unavailable For Legal Reasons",
500: "Internal Server Error",
501: "Not Implemented",
502: "Bad Gateway",
503: "Service Unavailable",
504: "Gateway Timeout",
505: "HTTP Version Not Supported",
506: "Variant Also Negotiates", # see RFC 2295
507: "Insufficient Storage",
508: "Loop Detected", # see RFC 5842
510: "Not Extended",
511: "Network Authentication Failed", # see RFC 6585
}
def wsgi_to_bytes(data):
"""coerce wsgi unicode represented bytes to real ones"""
if isinstance(data, bytes):
return data
return data.encode("latin1") # XXX: utf8 fallback?
def bytes_to_wsgi(data):
assert isinstance(data, bytes), "data must be bytes"
if isinstance(data, str):
return data
else:
return data.decode("latin1")
def quote_header_value(value, extra_chars="", allow_token=True):
"""Quote a header value if necessary.
.. versionadded:: 0.5
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
"""
if isinstance(value, bytes):
value = bytes_to_wsgi(value)
value = str(value)
if allow_token:
token_chars = _token_chars | set(extra_chars)
if set(value).issubset(token_chars):
return value
return '"%s"' % value.replace("\\", "\\\\").replace('"', '\\"')
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
.. versionadded:: 0.5
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != "\\\\":
return value.replace("\\\\", "\\").replace('\\"', '"')
return value
def dump_options_header(header, options):
"""The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
"""
segments = []
if header is not None:
segments.append(header)
for key, value in iteritems(options):
if value is None:
segments.append(key)
else:
segments.append("%s=%s" % (key, quote_header_value(value)))
return "; ".join(segments)
def dump_header(iterable, allow_token=True):
"""Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs.
>>> dump_header({'foo': 'bar baz'})
'foo="bar baz"'
>>> dump_header(('foo', 'bar baz'))
'foo, "bar baz"'
:param iterable: the iterable or dict of values to quote.
:param allow_token: if set to `False` tokens as values are disallowed.
See :func:`quote_header_value` for more details.
"""
if isinstance(iterable, dict):
items = []
for key, value in iteritems(iterable):
if value is None:
items.append(key)
else:
items.append(
"%s=%s" % (key, quote_header_value(value, allow_token=allow_token))
)
else:
items = [quote_header_value(x, allow_token=allow_token) for x in iterable]
return ", ".join(items)
def dump_csp_header(header):
"""Dump a Content Security Policy header.
These are structured into policies such as "default-src 'self';
script-src 'self'".
.. versionadded:: 1.0.0
Support for Content Security Policy headers was added.
"""
return "; ".join("%s %s" % (key, value) for key, value in iteritems(header))
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_dict_header(value, cls=dict):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict (or any other mapping object created from
the type with a dict like interface provided by the `cls` argument):
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
.. versionchanged:: 0.9
Added support for `cls` argument.
:param value: a string with a dict header.
:param cls: callable to use for storage of parsed results.
:return: an instance of `cls`
"""
result = cls()
if not isinstance(value, text_type):
# XXX: validate
value = bytes_to_wsgi(value)
for item in _parse_list_header(value):
if "=" not in item:
result[item] = None
continue
name, value = item.split("=", 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
def parse_options_header(value, multiple=False):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('text/html; charset=utf8')
('text/html', {'charset': 'utf8'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionchanged:: 0.15
:rfc:`2231` parameter continuations are handled.
.. versionadded:: 0.5
:param value: the header to parse.
:param multiple: Whether try to parse and return multiple MIME types
:return: (mimetype, options) or (mimetype, options, mimetype, options, …)
if multiple=True
"""
if not value:
return "", {}
result = []
value = "," + value.replace("\n", ",")
while value:
match = _option_header_start_mime_type.match(value)
if not match:
break
result.append(match.group(1)) # mimetype
options = {}
# Parse options
rest = match.group(2)
continued_encoding = None
while rest:
optmatch = _option_header_piece_re.match(rest)
if not optmatch:
break
option, count, encoding, language, option_value = optmatch.groups()
# Continuations don't have to supply the encoding after the
# first line. If we're in a continuation, track the current
# encoding to use for subsequent lines. Reset it when the
# continuation ends.
if not count:
continued_encoding = None
else:
if not encoding:
encoding = continued_encoding
continued_encoding = encoding
option = unquote_header_value(option)
if option_value is not None:
option_value = unquote_header_value(option_value, option == "filename")
if encoding is not None:
option_value = _unquote(option_value).decode(encoding)
if count:
# Continuations append to the existing value. For
# simplicity, this ignores the possibility of
# out-of-order indices, which shouldn't happen anyway.
options[option] = options.get(option, "") + option_value
else:
options[option] = option_value
rest = rest[optmatch.end() :]
result.append(options)
if multiple is False:
return tuple(result)
value = rest
return tuple(result) if result else ("", {})
def parse_accept_header(value, cls=None):
"""Parses an HTTP Accept-* header. This does not implement a complete
valid algorithm but one that supports at least value and quality
extraction.
Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
tuples sorted by the quality with some additional accessor methods).
The second parameter can be a subclass of :class:`Accept` that is created
with the parsed values and returned.
:param value: the accept header string to be parsed.
:param cls: the wrapper class for the return value (can be
:class:`Accept` or a subclass thereof)
:return: an instance of `cls`.
"""
if cls is None:
cls = Accept
if not value:
return cls(None)
result = []
for match in _accept_re.finditer(value):
quality = match.group(2)
if not quality:
quality = 1
else:
quality = max(min(float(quality), 1), 0)
result.append((match.group(1), quality))
return cls(result)
def parse_cache_control_header(value, on_update=None, cls=None):
"""Parse a cache control header. The RFC differs between response and
request cache control, this method does not. It's your responsibility
to not use the wrong control statements.
.. versionadded:: 0.5
The `cls` was added. If not specified an immutable
:class:`~werkzeug.datastructures.RequestCacheControl` is returned.
:param value: a cache control header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.CacheControl`
object is changed.
:param cls: the class for the returned object. By default
:class:`~werkzeug.datastructures.RequestCacheControl` is used.
:return: a `cls` object.
"""
if cls is None:
cls = RequestCacheControl
if not value:
return cls(None, on_update)
return cls(parse_dict_header(value), on_update)
def parse_csp_header(value, on_update=None, cls=None):
"""Parse a Content Security Policy header.
.. versionadded:: 1.0.0
Support for Content Security Policy headers was added.
:param value: a csp header to be parsed.
:param on_update: an optional callable that is called every time a value
on the object is changed.
:param cls: the class for the returned object. By default
:class:`~werkzeug.datastructures.ContentSecurityPolicy` is used.
:return: a `cls` object.
"""
if cls is None:
cls = ContentSecurityPolicy
if value is None:
return cls(None, on_update)
items = []
for policy in value.split(";"):
policy = policy.strip()
# Ignore badly formatted policies (no space)
if " " in policy:
directive, value = policy.strip().split(" ", 1)
items.append((directive.strip(), value.strip()))
return cls(items, on_update)
def parse_set_header(value, on_update=None):
"""Parse a set-like header and return a
:class:`~werkzeug.datastructures.HeaderSet` object:
>>> hs = parse_set_header('token, "quoted value"')
The return value is an object that treats the items case-insensitively
and keeps the order of the items:
>>> 'TOKEN' in hs
True
>>> hs.index('quoted value')
1
>>> hs
HeaderSet(['token', 'quoted value'])
To create a header from the :class:`HeaderSet` again, use the
:func:`dump_header` function.
:param value: a set header to be parsed.
:param on_update: an optional callable that is called every time a
value on the :class:`~werkzeug.datastructures.HeaderSet`
object is changed.
:return: a :class:`~werkzeug.datastructures.HeaderSet`
"""
if not value:
return HeaderSet(None, on_update)
return HeaderSet(parse_list_header(value), on_update)
def parse_authorization_header(value):
"""Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
object.
:param value: the authorization header to parse.
:return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
"""
if not value:
return
value = wsgi_to_bytes(value)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except ValueError:
return
if auth_type == b"basic":
try:
username, password = base64.b64decode(auth_info).split(b":", 1)
except Exception:
return
return Authorization(
"basic",
{
"username": to_unicode(username, _basic_auth_charset),
"password": to_unicode(password, _basic_auth_charset),
},
)
elif auth_type == b"digest":
auth_map = parse_dict_header(auth_info)
for key in "username", "realm", "nonce", "uri", "response":
if key not in auth_map:
return
if "qop" in auth_map:
if not auth_map.get("nc") or not auth_map.get("cnonce"):
return
return Authorization("digest", auth_map)
def parse_www_authenticate_header(value, on_update=None):
"""Parse an HTTP WWW-Authenticate header into a
:class:`~werkzeug.datastructures.WWWAuthenticate` object.
:param value: a WWW-Authenticate header to parse.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.WWWAuthenticate`
object is changed.
:return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
"""
if not value:
return WWWAuthenticate(on_update=on_update)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except (ValueError, AttributeError):
return WWWAuthenticate(value.strip().lower(), on_update=on_update)
return WWWAuthenticate(auth_type, parse_dict_header(auth_info), on_update)
def parse_if_range_header(value):
"""Parses an if-range header which can be an etag or a date. Returns
a :class:`~werkzeug.datastructures.IfRange` object.
.. versionadded:: 0.7
"""
if not value:
return IfRange()
date = parse_date(value)
if date is not None:
return IfRange(date=date)
# drop weakness information
return IfRange(unquote_etag(value)[0])
def parse_range_header(value, make_inclusive=True):
"""Parses a range header into a :class:`~werkzeug.datastructures.Range`
object. If the header is missing or malformed `None` is returned.
`ranges` is a list of ``(start, stop)`` tuples where the ranges are
non-inclusive.
.. versionadded:: 0.7
"""
if not value or "=" not in value:
return None
ranges = []
last_end = 0
units, rng = value.split("=", 1)
units = units.strip().lower()
for item in rng.split(","):
item = item.strip()
if "-" not in item:
return None
if item.startswith("-"):
if last_end < 0:
return None
try:
begin = int(item)
except ValueError:
return None
end = None
last_end = -1
elif "-" in item:
begin, end = item.split("-", 1)
begin = begin.strip()
end = end.strip()
if not begin.isdigit():
return None
begin = int(begin)
if begin < last_end or last_end < 0:
return None
if end:
if not end.isdigit():
return None
end = int(end) + 1
if begin >= end:
return None
else:
end = None
last_end = end
ranges.append((begin, end))
return Range(units, ranges)
def parse_content_range_header(value, on_update=None):
"""Parses a range header into a
:class:`~werkzeug.datastructures.ContentRange` object or `None` if
parsing is not possible.
.. versionadded:: 0.7
:param value: a content range header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.ContentRange`
object is changed.
"""
if value is None:
return None
try:
units, rangedef = (value or "").strip().split(None, 1)
except ValueError:
return None
if "/" not in rangedef:
return None
rng, length = rangedef.split("/", 1)
if length == "*":
length = None
elif length.isdigit():
length = int(length)
else:
return None
if rng == "*":
return ContentRange(units, None, None, length, on_update=on_update)
elif "-" not in rng:
return None
start, stop = rng.split("-", 1)
try:
start = int(start)
stop = int(stop) + 1
except ValueError:
return None
if is_byte_range_valid(start, stop, length):
return ContentRange(units, start, stop, length, on_update=on_update)
def quote_etag(etag, weak=False):
"""Quote an etag.
:param etag: the etag to quote.
:param weak: set to `True` to tag it "weak".
"""
if '"' in etag:
raise ValueError("invalid etag")
etag = '"%s"' % etag
if weak:
etag = "W/" + etag
return etag
def unquote_etag(etag):
"""Unquote a single etag:
>>> unquote_etag('W/"bar"')
('bar', True)
>>> unquote_etag('"bar"')
('bar', False)
:param etag: the etag identifier to unquote.
:return: a ``(etag, weak)`` tuple.
"""
if not etag:
return None, None
etag = etag.strip()
weak = False
if etag.startswith(("W/", "w/")):
weak = True
etag = etag[2:]
if etag[:1] == etag[-1:] == '"':
etag = etag[1:-1]
return etag, weak
def parse_etags(value):
"""Parse an etag header.
:param value: the tag header to parse
:return: an :class:`~werkzeug.datastructures.ETags` object.
"""
if not value:
return ETags()
strong = []
weak = []
end = len(value)
pos = 0
while pos < end:
match = _etag_re.match(value, pos)
if match is None:
break
is_weak, quoted, raw = match.groups()
if raw == "*":
return ETags(star_tag=True)
elif quoted:
raw = quoted
if is_weak:
weak.append(raw)
else:
strong.append(raw)
pos = match.end()
return ETags(strong, weak)
def generate_etag(data):
"""Generate an etag for some data."""
return md5(data).hexdigest()
def parse_date(value):
"""Parse one of the following date formats into a datetime object:
.. sourcecode:: text
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
If parsing fails the return value is `None`.
:param value: a string with a supported date format.
:return: a :class:`datetime.datetime` object.
"""
if value:
t = parsedate_tz(value.strip())
if t is not None:
try:
year = t[0]
# unfortunately that function does not tell us if two digit
# years were part of the string, or if they were prefixed
# with two zeroes. So what we do is to assume that 69-99
# refer to 1900, and everything below to 2000
if year >= 0 and year <= 68:
year += 2000
elif year >= 69 and year <= 99:
year += 1900
return datetime(*((year,) + t[1:7])) - timedelta(seconds=t[-1] or 0)
except (ValueError, OverflowError):
return None
def _dump_date(d, delim):
"""Used for `http_date` and `cookie_date`."""
if d is None:
d = gmtime()
elif isinstance(d, datetime):
d = d.utctimetuple()
elif isinstance(d, (integer_types, float)):
d = gmtime(d)
return "%s, %02d%s%s%s%04d %02d:%02d:%02d GMT" % (
("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[d.tm_wday],
d.tm_mday,
delim,
(
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
)[d.tm_mon - 1],
delim,
d.tm_year,
d.tm_hour,
d.tm_min,
d.tm_sec,
)
def cookie_date(expires=None):
"""Formats the time to ensure compatibility with Netscape's cookie
standard.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.
:param expires: If provided that date is used, otherwise the current.
"""
return _dump_date(expires, "-")
def http_date(timestamp=None):
"""Formats the time to match the RFC1123 date format.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``.
:param timestamp: If provided that date is used, otherwise the current.
"""
return _dump_date(timestamp, " ")
def parse_age(value=None):
"""Parses a base-10 integer count of seconds into a timedelta.
If parsing fails, the return value is `None`.
:param value: a string consisting of an integer represented in base-10
:return: a :class:`datetime.timedelta` object or `None`.
"""
if not value:
return None
try:
seconds = int(value)
except ValueError:
return None
if seconds < 0:
return None
try:
return timedelta(seconds=seconds)
except OverflowError:
return None
def dump_age(age=None):
"""Formats the duration as a base-10 integer.
:param age: should be an integer number of seconds,
a :class:`datetime.timedelta` object, or,
if the age is unknown, `None` (default).
"""
if age is None:
return
if isinstance(age, timedelta):
# do the equivalent of Python 2.7's timedelta.total_seconds(),
# but disregarding fractional seconds
age = age.seconds + (age.days * 24 * 3600)
age = int(age)
if age < 0:
raise ValueError("age cannot be negative")
return str(age)
def is_resource_modified(
environ, etag=None, data=None, last_modified=None, ignore_if_range=True
):
"""Convenience method for conditional requests.
:param environ: the WSGI environment of the request to be checked.
:param etag: the etag for the response for comparison.
:param data: or alternatively the data of the response to automatically
generate an etag using :func:`generate_etag`.
:param last_modified: an optional date of the last modification.
:param ignore_if_range: If `False`, `If-Range` header will be taken into
account.
:return: `True` if the resource was modified, otherwise `False`.
.. versionchanged:: 1.0.0
The check is run for methods other than ``GET`` and ``HEAD``.
"""
if etag is None and data is not None:
etag = generate_etag(data)
elif data is not None:
raise TypeError("both data and etag given")
unmodified = False
if isinstance(last_modified, string_types):
last_modified = parse_date(last_modified)
# ensure that microsecond is zero because the HTTP spec does not transmit
# that either and we might have some false positives. See issue #39
if last_modified is not None:
last_modified = last_modified.replace(microsecond=0)
if_range = None
if not ignore_if_range and "HTTP_RANGE" in environ:
# https://tools.ietf.org/html/rfc7233#section-3.2
# A server MUST ignore an If-Range header field received in a request
# that does not contain a Range header field.
if_range = parse_if_range_header(environ.get("HTTP_IF_RANGE"))
if if_range is not None and if_range.date is not None:
modified_since = if_range.date
else:
modified_since = parse_date(environ.get("HTTP_IF_MODIFIED_SINCE"))
if modified_since and last_modified and last_modified <= modified_since:
unmodified = True
if etag:
etag, _ = unquote_etag(etag)
if if_range is not None and if_range.etag is not None:
unmodified = parse_etags(if_range.etag).contains(etag)
else:
if_none_match = parse_etags(environ.get("HTTP_IF_NONE_MATCH"))
if if_none_match:
# https://tools.ietf.org/html/rfc7232#section-3.2
# "A recipient MUST use the weak comparison function when comparing
# entity-tags for If-None-Match"
unmodified = if_none_match.contains_weak(etag)
# https://tools.ietf.org/html/rfc7232#section-3.1
# "Origin server MUST use the strong comparison function when
# comparing entity-tags for If-Match"
if_match = parse_etags(environ.get("HTTP_IF_MATCH"))
if if_match:
unmodified = not if_match.is_strong(etag)
return not unmodified
def remove_entity_headers(headers, allowed=("expires", "content-location")):
"""Remove all entity headers from a list or :class:`Headers` object. This
operation works in-place. `Expires` and `Content-Location` headers are
by default not removed. The reason for this is :rfc:`2616` section
10.3.5 which specifies some entity headers that should be sent.
.. versionchanged:: 0.5
added `allowed` parameter.
:param headers: a list or :class:`Headers` object.
:param allowed: a list of headers that should still be allowed even though
they are entity headers.
"""
allowed = set(x.lower() for x in allowed)
headers[:] = [
(key, value)
for key, value in headers
if not is_entity_header(key) or key.lower() in allowed
]
def remove_hop_by_hop_headers(headers):
"""Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
:class:`Headers` object. This operation works in-place.
.. versionadded:: 0.5
:param headers: a list or :class:`Headers` object.
"""
headers[:] = [
(key, value) for key, value in headers if not is_hop_by_hop_header(key)
]
def is_entity_header(header):
"""Check if a header is an entity header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _entity_headers
def is_hop_by_hop_header(header):
"""Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an HTTP/1.1 "Hop-by-Hop" header, `False` otherwise.
"""
return header.lower() in _hop_by_hop_headers
def parse_cookie(header, charset="utf-8", errors="replace", cls=None):
"""Parse a cookie from a string or WSGI environ.
The same key can be provided multiple times, the values are stored
in-order. The default :class:`MultiDict` will have the first value
first, and all values can be retrieved with
:meth:`MultiDict.getlist`.
:param header: The cookie header as a string, or a WSGI environ dict
with a ``HTTP_COOKIE`` key.
:param charset: The charset for the cookie values.
:param errors: The error behavior for the charset decoding.
:param cls: A dict-like class to store the parsed cookies in.
Defaults to :class:`MultiDict`.
.. versionchanged:: 1.0.0
Returns a :class:`MultiDict` instead of a
``TypeConversionDict``.
.. versionchanged:: 0.5
Returns a :class:`TypeConversionDict` instead of a regular dict.
The ``cls`` parameter was added.
"""
if isinstance(header, dict):
header = header.get("HTTP_COOKIE", "")
elif header is None:
header = ""
# On Python 3, PEP 3333 sends headers through the environ as latin1
# decoded strings. Encode strings back to bytes for parsing.
if isinstance(header, text_type):
header = header.encode("latin1", "replace")
if cls is None:
cls = MultiDict
def _parse_pairs():
for key, val in _cookie_parse_impl(header):
key = to_unicode(key, charset, errors, allow_none_charset=True)
if not key:
continue
val = to_unicode(val, charset, errors, allow_none_charset=True)
yield try_coerce_native(key), val
return cls(_parse_pairs())
def dump_cookie(
key,
value="",
max_age=None,
expires=None,
path="/",
domain=None,
secure=False,
httponly=False,
charset="utf-8",
sync_expires=True,
max_size=4093,
samesite=None,
):
"""Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
The parameters are the same as in the cookie Morsel object in the
Python standard library but it accepts unicode data, too.
On Python 3 the return value of this function will be a unicode
string, on Python 2 it will be a native string. In both cases the
return value is usually restricted to ascii as the vast majority of
values are properly escaped, but that is no guarantee. If a unicode
string is returned it's tunneled through latin1 as required by
PEP 3333.
The return value is not ASCII safe if the key contains unicode
characters. This is technically against the specification but
happens in the wild. It's strongly recommended to not use
non-ASCII values for the keys.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session. Additionally `timedelta` objects
are accepted, too.
:param expires: should be a `datetime` object or unix timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: Use this if you want to set a cross-domain cookie. For
example, ``domain=".example.com"`` will set a cookie
that is readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: The cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
:param charset: the encoding for unicode values.
:param sync_expires: automatically set expires if max_age is defined
but expires not.
:param max_size: Warn if the final header value exceeds this size. The
default, 4093, should be safely `supported by most browsers
<cookie_>`_. Set to 0 to disable this check.
:param samesite: Limits the scope of the cookie such that it will
only be attached to requests if those requests are same-site.
.. _`cookie`: http://browsercookielimits.squawky.net/
.. versionchanged:: 1.0.0
The string ``'None'`` is accepted for ``samesite``.
"""
key = to_bytes(key, charset)
value = to_bytes(value, charset)
if path is not None:
from .urls import iri_to_uri
path = iri_to_uri(path, charset)
domain = _make_cookie_domain(domain)
if isinstance(max_age, timedelta):
max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
if expires is not None:
if not isinstance(expires, string_types):
expires = cookie_date(expires)
elif max_age is not None and sync_expires:
expires = to_bytes(cookie_date(time() + max_age))
if samesite is not None:
samesite = samesite.title()
if samesite not in {"Strict", "Lax", "None"}:
raise ValueError("SameSite must be 'Strict', 'Lax', or 'None'.")
buf = [key + b"=" + _cookie_quote(value)]
# XXX: In theory all of these parameters that are not marked with `None`
# should be quoted. Because stdlib did not quote it before I did not
# want to introduce quoting there now.
for k, v, q in (
(b"Domain", domain, True),
(b"Expires", expires, False),
(b"Max-Age", max_age, False),
(b"Secure", secure, None),
(b"HttpOnly", httponly, None),
(b"Path", path, False),
(b"SameSite", samesite, False),
):
if q is None:
if v:
buf.append(k)
continue
if v is None:
continue
tmp = bytearray(k)
if not isinstance(v, (bytes, bytearray)):
v = to_bytes(text_type(v), charset)
if q:
v = _cookie_quote(v)
tmp += b"=" + v
buf.append(bytes(tmp))
# The return value will be an incorrectly encoded latin1 header on
# Python 3 for consistency with the headers object and a bytestring
# on Python 2 because that's how the API makes more sense.
rv = b"; ".join(buf)
if not PY2:
rv = rv.decode("latin1")
# Warn if the final value of the cookie is larger than the limit. If the
# cookie is too large, then it may be silently ignored by the browser,
# which can be quite hard to debug.
cookie_size = len(rv)
if max_size and cookie_size > max_size:
value_size = len(value)
warnings.warn(
'The "{key}" cookie is too large: the value was {value_size} bytes'
" but the header required {extra_size} extra bytes. The final size"
" was {cookie_size} bytes but the limit is {max_size} bytes."
" Browsers may silently ignore cookies larger than this.".format(
key=key,
value_size=value_size,
extra_size=cookie_size - value_size,
cookie_size=cookie_size,
max_size=max_size,
),
stacklevel=2,
)
return rv
def is_byte_range_valid(start, stop, length):
"""Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7
"""
if (start is None) != (stop is None):
return False
elif start is None:
return length is None or length >= 0
elif length is None:
return 0 <= start < stop
elif start >= stop:
return False
return 0 <= start < length
# circular dependencies
from .datastructures import Accept
from .datastructures import Authorization
from .datastructures import ContentRange
from .datastructures import ContentSecurityPolicy
from .datastructures import ETags
from .datastructures import HeaderSet
from .datastructures import IfRange
from .datastructures import MultiDict
from .datastructures import Range
from .datastructures import RequestCacheControl
from .datastructures import WWWAuthenticate
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/useragents.py
|
# -*- coding: utf-8 -*-
"""
werkzeug.useragents
~~~~~~~~~~~~~~~~~~~
This module provides a helper to inspect user agent strings. This module
is far from complete but should work for most of the currently available
browsers.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import re
class UserAgentParser(object):
"""A simple user agent parser. Used by the `UserAgent`."""
platforms = (
(" cros ", "chromeos"),
("iphone|ios", "iphone"),
("ipad", "ipad"),
(r"darwin|mac|os\s*x", "macos"),
("win", "windows"),
(r"android", "android"),
("netbsd", "netbsd"),
("openbsd", "openbsd"),
("freebsd", "freebsd"),
("dragonfly", "dragonflybsd"),
("(sun|i86)os", "solaris"),
(r"x11|lin(\b|ux)?", "linux"),
(r"nintendo\s+wii", "wii"),
("irix", "irix"),
("hp-?ux", "hpux"),
("aix", "aix"),
("sco|unix_sv", "sco"),
("bsd", "bsd"),
("amiga", "amiga"),
("blackberry|playbook", "blackberry"),
("symbian", "symbian"),
)
browsers = (
("googlebot", "google"),
("msnbot", "msn"),
("yahoo", "yahoo"),
("ask jeeves", "ask"),
(r"aol|america\s+online\s+browser", "aol"),
(r"opera|opr", "opera"),
("edge", "edge"),
("chrome|crios", "chrome"),
("seamonkey", "seamonkey"),
("firefox|firebird|phoenix|iceweasel", "firefox"),
("galeon", "galeon"),
("safari|version", "safari"),
("webkit", "webkit"),
("camino", "camino"),
("konqueror", "konqueror"),
("k-meleon", "kmeleon"),
("netscape", "netscape"),
(r"msie|microsoft\s+internet\s+explorer|trident/.+? rv:", "msie"),
("lynx", "lynx"),
("links", "links"),
("Baiduspider", "baidu"),
("bingbot", "bing"),
("mozilla", "mozilla"),
)
_browser_version_re = r"(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?"
_language_re = re.compile(
r"(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|"
r"(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)"
)
def __init__(self):
self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
self.browsers = [
(b, re.compile(self._browser_version_re % a, re.I))
for a, b in self.browsers
]
def __call__(self, user_agent):
for platform, regex in self.platforms: # noqa: B007
match = regex.search(user_agent)
if match is not None:
break
else:
platform = None
for browser, regex in self.browsers: # noqa: B007
match = regex.search(user_agent)
if match is not None:
version = match.group(1)
break
else:
browser = version = None
match = self._language_re.search(user_agent)
if match is not None:
language = match.group(1) or match.group(2)
else:
language = None
return platform, browser, version, language
class UserAgent(object):
"""Represents a user agent. Pass it a WSGI environment or a user agent
string and you can inspect some of the details from the user agent
string via the attributes. The following attributes exist:
.. attribute:: string
the raw user agent string
.. attribute:: platform
the browser platform. ``None`` if not recognized.
The following platforms are currently recognized:
- `aix`
- `amiga`
- `android`
- `blackberry`
- `bsd`
- `chromeos`
- `dragonflybsd`
- `freebsd`
- `hpux`
- `ipad`
- `iphone`
- `irix`
- `linux`
- `macos`
- `netbsd`
- `openbsd`
- `sco`
- `solaris`
- `symbian`
- `wii`
- `windows`
.. attribute:: browser
the name of the browser. ``None`` if not recognized.
The following browsers are currently recognized:
- `aol` *
- `ask` *
- `baidu` *
- `bing` *
- `camino`
- `chrome`
- `edge`
- `firefox`
- `galeon`
- `google` *
- `kmeleon`
- `konqueror`
- `links`
- `lynx`
- `mozilla`
- `msie`
- `msn`
- `netscape`
- `opera`
- `safari`
- `seamonkey`
- `webkit`
- `yahoo` *
(Browsers marked with a star (``*``) are crawlers.)
.. attribute:: version
the version of the browser. ``None`` if not recognized.
.. attribute:: language
the language of the browser. ``None`` if not recognized.
"""
_parser = UserAgentParser()
def __init__(self, environ_or_string):
if isinstance(environ_or_string, dict):
environ_or_string = environ_or_string.get("HTTP_USER_AGENT", "")
self.string = environ_or_string
self.platform, self.browser, self.version, self.language = self._parser(
environ_or_string
)
def to_header(self):
return self.string
def __str__(self):
return self.string
def __nonzero__(self):
return bool(self.browser)
__bool__ = __nonzero__
def __repr__(self):
return "<%s %r/%s>" % (self.__class__.__name__, self.browser, self.version)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/exceptions.py
|
# -*- coding: utf-8 -*-
"""
werkzeug.exceptions
~~~~~~~~~~~~~~~~~~~
This module implements a number of Python exceptions you can raise from
within your views to trigger a standard non-200 response.
Usage Example
-------------
::
from werkzeug.wrappers import BaseRequest
from werkzeug.wsgi import responder
from werkzeug.exceptions import HTTPException, NotFound
def view(request):
raise NotFound()
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except HTTPException as e:
return e
As you can see from this example those exceptions are callable WSGI
applications. Because of Python 2.4 compatibility those do not extend
from the response objects but only from the python exception class.
As a matter of fact they are not Werkzeug response objects. However you
can get a response object by calling ``get_response()`` on a HTTP
exception.
Keep in mind that you have to pass an environment to ``get_response()``
because some errors fetch additional information from the WSGI
environment.
If you want to hook in a different exception page to say, a 404 status
code, you can add a second except for a specific subclass of an error::
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except NotFound, e:
return not_found(request)
except HTTPException, e:
return e
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import sys
from datetime import datetime
from ._compat import implements_to_string
from ._compat import integer_types
from ._compat import iteritems
from ._compat import text_type
from ._internal import _get_environ
from .utils import escape
@implements_to_string
class HTTPException(Exception):
"""Baseclass for all HTTP exceptions. This exception can be called as WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
"""
code = None
description = None
def __init__(self, description=None, response=None):
super(HTTPException, self).__init__()
if description is not None:
self.description = description
self.response = response
@classmethod
def wrap(cls, exception, name=None):
"""Create an exception that is a subclass of the calling HTTP
exception and the ``exception`` argument.
The first argument to the class will be passed to the
wrapped ``exception``, the rest to the HTTP exception. If
``e.args`` is not empty and ``e.show_exception`` is ``True``,
the wrapped exception message is added to the HTTP error
description.
.. versionchanged:: 0.15.5
The ``show_exception`` attribute controls whether the
description includes the wrapped exception message.
.. versionchanged:: 0.15.0
The description includes the wrapped exception message.
"""
class newcls(cls, exception):
_description = cls.description
show_exception = False
def __init__(self, arg=None, *args, **kwargs):
super(cls, self).__init__(*args, **kwargs)
if arg is None:
exception.__init__(self)
else:
exception.__init__(self, arg)
@property
def description(self):
if self.show_exception:
return "{}\n{}: {}".format(
self._description, exception.__name__, exception.__str__(self)
)
return self._description
@description.setter
def description(self, value):
self._description = value
newcls.__module__ = sys._getframe(1).f_globals.get("__name__")
name = name or cls.__name__ + exception.__name__
newcls.__name__ = newcls.__qualname__ = name
return newcls
@property
def name(self):
"""The status name."""
from .http import HTTP_STATUS_CODES
return HTTP_STATUS_CODES.get(self.code, "Unknown Error")
def get_description(self, environ=None):
"""Get the description."""
return u"<p>%s</p>" % escape(self.description).replace("\n", "<br>")
def get_body(self, environ=None):
"""Get the HTML body."""
return text_type(
(
u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
u"<title>%(code)s %(name)s</title>\n"
u"<h1>%(name)s</h1>\n"
u"%(description)s\n"
)
% {
"code": self.code,
"name": escape(self.name),
"description": self.get_description(environ),
}
)
def get_headers(self, environ=None):
"""Get a list of headers."""
return [("Content-Type", "text/html; charset=utf-8")]
def get_response(self, environ=None):
"""Get a response object. If one was passed to the exception
it's returned directly.
:param environ: the optional environ for the request. This
can be used to modify the response depending
on how the request looked like.
:return: a :class:`Response` object or a subclass thereof.
"""
from .wrappers.response import Response
if self.response is not None:
return self.response
if environ is not None:
environ = _get_environ(environ)
headers = self.get_headers(environ)
return Response(self.get_body(environ), self.code, headers)
def __call__(self, environ, start_response):
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = self.get_response(environ)
return response(environ, start_response)
def __str__(self):
code = self.code if self.code is not None else "???"
return "%s %s: %s" % (code, self.name, self.description)
def __repr__(self):
code = self.code if self.code is not None else "???"
return "<%s '%s: %s'>" % (self.__class__.__name__, code, self.name)
class BadRequest(HTTPException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
description = (
"The browser (or proxy) sent a request that this server could "
"not understand."
)
class ClientDisconnected(BadRequest):
"""Internal exception that is raised if Werkzeug detects a disconnected
client. Since the client is already gone at that point attempting to
send the error message to the client might not work and might ultimately
result in another exception in the server. Mainly this is here so that
it is silenced by default as far as Werkzeug is concerned.
Since disconnections cannot be reliably detected and are unspecified
by WSGI to a large extent this might or might not be raised if a client
is gone.
.. versionadded:: 0.8
"""
class SecurityError(BadRequest):
"""Raised if something triggers a security error. This is otherwise
exactly like a bad request error.
.. versionadded:: 0.9
"""
class BadHost(BadRequest):
"""Raised if the submitted host is badly formatted.
.. versionadded:: 0.11.2
"""
class Unauthorized(HTTPException):
"""*401* ``Unauthorized``
Raise if the user is not authorized to access a resource.
The ``www_authenticate`` argument should be used to set the
``WWW-Authenticate`` header. This is used for HTTP basic auth and
other schemes. Use :class:`~werkzeug.datastructures.WWWAuthenticate`
to create correctly formatted values. Strictly speaking a 401
response is invalid if it doesn't provide at least one value for
this header, although real clients typically don't care.
:param description: Override the default message used for the body
of the response.
:param www-authenticate: A single value, or list of values, for the
WWW-Authenticate header.
.. versionchanged:: 0.15.3
If the ``www_authenticate`` argument is not set, the
``WWW-Authenticate`` header is not set.
.. versionchanged:: 0.15.3
The ``response`` argument was restored.
.. versionchanged:: 0.15.1
``description`` was moved back as the first argument, restoring
its previous position.
.. versionchanged:: 0.15.0
``www_authenticate`` was added as the first argument, ahead of
``description``.
"""
code = 401
description = (
"The server could not verify that you are authorized to access"
" the URL requested. You either supplied the wrong credentials"
" (e.g. a bad password), or your browser doesn't understand"
" how to supply the credentials required."
)
def __init__(self, description=None, response=None, www_authenticate=None):
HTTPException.__init__(self, description, response)
if www_authenticate is not None:
if not isinstance(www_authenticate, (tuple, list)):
www_authenticate = (www_authenticate,)
self.www_authenticate = www_authenticate
def get_headers(self, environ=None):
headers = HTTPException.get_headers(self, environ)
if self.www_authenticate:
headers.append(
("WWW-Authenticate", ", ".join([str(x) for x in self.www_authenticate]))
)
return headers
class Forbidden(HTTPException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
description = (
"You don't have the permission to access the requested"
" resource. It is either read-protected or not readable by the"
" server."
)
class NotFound(HTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
description = (
"The requested URL was not found on the server. If you entered"
" the URL manually please check your spelling and try again."
)
class MethodNotAllowed(HTTPException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
description = "The method is not allowed for the requested URL."
def __init__(self, valid_methods=None, description=None):
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
HTTPException.__init__(self, description)
self.valid_methods = valid_methods
def get_headers(self, environ=None):
headers = HTTPException.get_headers(self, environ)
if self.valid_methods:
headers.append(("Allow", ", ".join(self.valid_methods)))
return headers
class NotAcceptable(HTTPException):
"""*406* `Not Acceptable`
Raise if the server can't return any content conforming to the
`Accept` headers of the client.
"""
code = 406
description = (
"The resource identified by the request is only capable of"
" generating response entities which have content"
" characteristics not acceptable according to the accept"
" headers sent in the request."
)
class RequestTimeout(HTTPException):
"""*408* `Request Timeout`
Raise to signalize a timeout.
"""
code = 408
description = (
"The server closed the network connection because the browser"
" didn't finish the request within the specified time."
)
class Conflict(HTTPException):
"""*409* `Conflict`
Raise to signal that a request cannot be completed because it conflicts
with the current state on the server.
.. versionadded:: 0.7
"""
code = 409
description = (
"A conflict happened while processing the request. The"
" resource might have been modified while the request was being"
" processed."
)
class Gone(HTTPException):
"""*410* `Gone`
Raise if a resource existed previously and went away without new location.
"""
code = 410
description = (
"The requested URL is no longer available on this server and"
" there is no forwarding address. If you followed a link from a"
" foreign page, please contact the author of this page."
)
class LengthRequired(HTTPException):
"""*411* `Length Required`
Raise if the browser submitted data but no ``Content-Length`` header which
is required for the kind of processing the server does.
"""
code = 411
description = (
"A request with this method requires a valid <code>Content-"
"Length</code> header."
)
class PreconditionFailed(HTTPException):
"""*412* `Precondition Failed`
Status code used in combination with ``If-Match``, ``If-None-Match``, or
``If-Unmodified-Since``.
"""
code = 412
description = (
"The precondition on the request for the URL failed positive evaluation."
)
class RequestEntityTooLarge(HTTPException):
"""*413* `Request Entity Too Large`
The status code one should return if the data submitted exceeded a given
limit.
"""
code = 413
description = "The data value transmitted exceeds the capacity limit."
class RequestURITooLarge(HTTPException):
"""*414* `Request URI Too Large`
Like *413* but for too long URLs.
"""
code = 414
description = (
"The length of the requested URL exceeds the capacity limit for"
" this server. The request cannot be processed."
)
class UnsupportedMediaType(HTTPException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
description = (
"The server does not support the media type transmitted in the request."
)
class RequestedRangeNotSatisfiable(HTTPException):
"""*416* `Requested Range Not Satisfiable`
The client asked for an invalid part of the file.
.. versionadded:: 0.7
"""
code = 416
description = "The server cannot provide the requested range."
def __init__(self, length=None, units="bytes", description=None):
"""Takes an optional `Content-Range` header value based on ``length``
parameter.
"""
HTTPException.__init__(self, description)
self.length = length
self.units = units
def get_headers(self, environ=None):
headers = HTTPException.get_headers(self, environ)
if self.length is not None:
headers.append(("Content-Range", "%s */%d" % (self.units, self.length)))
return headers
class ExpectationFailed(HTTPException):
"""*417* `Expectation Failed`
The server cannot meet the requirements of the Expect request-header.
.. versionadded:: 0.7
"""
code = 417
description = "The server could not meet the requirements of the Expect header"
class ImATeapot(HTTPException):
"""*418* `I'm a teapot`
The server should return this if it is a teapot and someone attempted
to brew coffee with it.
.. versionadded:: 0.7
"""
code = 418
description = "This server is a teapot, not a coffee machine"
class UnprocessableEntity(HTTPException):
"""*422* `Unprocessable Entity`
Used if the request is well formed, but the instructions are otherwise
incorrect.
"""
code = 422
description = (
"The request was well-formed but was unable to be followed due"
" to semantic errors."
)
class Locked(HTTPException):
"""*423* `Locked`
Used if the resource that is being accessed is locked.
"""
code = 423
description = "The resource that is being accessed is locked."
class FailedDependency(HTTPException):
"""*424* `Failed Dependency`
Used if the method could not be performed on the resource
because the requested action depended on another action and that action failed.
"""
code = 424
description = (
"The method could not be performed on the resource because the"
" requested action depended on another action and that action"
" failed."
)
class PreconditionRequired(HTTPException):
"""*428* `Precondition Required`
The server requires this request to be conditional, typically to prevent
the lost update problem, which is a race condition between two or more
clients attempting to update a resource through PUT or DELETE. By requiring
each client to include a conditional header ("If-Match" or "If-Unmodified-
Since") with the proper value retained from a recent GET request, the
server ensures that each client has at least seen the previous revision of
the resource.
"""
code = 428
description = (
"This request is required to be conditional; try using"
' "If-Match" or "If-Unmodified-Since".'
)
class _RetryAfter(HTTPException):
"""Adds an optional ``retry_after`` parameter which will set the
``Retry-After`` header. May be an :class:`int` number of seconds or
a :class:`~datetime.datetime`.
"""
def __init__(self, description=None, response=None, retry_after=None):
super(_RetryAfter, self).__init__(description, response)
self.retry_after = retry_after
def get_headers(self, environ=None):
headers = super(_RetryAfter, self).get_headers(environ)
if self.retry_after:
if isinstance(self.retry_after, datetime):
from .http import http_date
value = http_date(self.retry_after)
else:
value = str(self.retry_after)
headers.append(("Retry-After", value))
return headers
class TooManyRequests(_RetryAfter):
"""*429* `Too Many Requests`
The server is limiting the rate at which this user receives
responses, and this request exceeds that rate. (The server may use
any convenient method to identify users and their request rates).
The server may include a "Retry-After" header to indicate how long
the user should wait before retrying.
:param retry_after: If given, set the ``Retry-After`` header to this
value. May be an :class:`int` number of seconds or a
:class:`~datetime.datetime`.
.. versionchanged:: 1.0
Added ``retry_after`` parameter.
"""
code = 429
description = "This user has exceeded an allotted request count. Try again later."
class RequestHeaderFieldsTooLarge(HTTPException):
"""*431* `Request Header Fields Too Large`
The server refuses to process the request because the header fields are too
large. One or more individual fields may be too large, or the set of all
headers is too large.
"""
code = 431
description = "One or more header fields exceeds the maximum size."
class UnavailableForLegalReasons(HTTPException):
"""*451* `Unavailable For Legal Reasons`
This status code indicates that the server is denying access to the
resource as a consequence of a legal demand.
"""
code = 451
description = "Unavailable for legal reasons."
class InternalServerError(HTTPException):
"""*500* `Internal Server Error`
Raise if an internal server error occurred. This is a good fallback if an
unknown error occurred in the dispatcher.
.. versionchanged:: 1.0.0
Added the :attr:`original_exception` attribute.
"""
code = 500
description = (
"The server encountered an internal error and was unable to"
" complete your request. Either the server is overloaded or"
" there is an error in the application."
)
def __init__(self, description=None, response=None, original_exception=None):
#: The original exception that caused this 500 error. Can be
#: used by frameworks to provide context when handling
#: unexpected errors.
self.original_exception = original_exception
super(InternalServerError, self).__init__(
description=description, response=response
)
class NotImplemented(HTTPException):
"""*501* `Not Implemented`
Raise if the application does not support the action requested by the
browser.
"""
code = 501
description = "The server does not support the action requested by the browser."
class BadGateway(HTTPException):
"""*502* `Bad Gateway`
If you do proxying in your application you should return this status code
if you received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
"""
code = 502
description = (
"The proxy server received an invalid response from an upstream server."
)
class ServiceUnavailable(_RetryAfter):
"""*503* `Service Unavailable`
Status code you should return if a service is temporarily
unavailable.
:param retry_after: If given, set the ``Retry-After`` header to this
value. May be an :class:`int` number of seconds or a
:class:`~datetime.datetime`.
.. versionchanged:: 1.0
Added ``retry_after`` parameter.
"""
code = 503
description = (
"The server is temporarily unable to service your request due"
" to maintenance downtime or capacity problems. Please try"
" again later."
)
class GatewayTimeout(HTTPException):
"""*504* `Gateway Timeout`
Status code you should return if a connection to an upstream server
times out.
"""
code = 504
description = "The connection to an upstream server timed out."
class HTTPVersionNotSupported(HTTPException):
"""*505* `HTTP Version Not Supported`
The server does not support the HTTP protocol version used in the request.
"""
code = 505
description = (
"The server does not support the HTTP protocol version used in the request."
)
default_exceptions = {}
__all__ = ["HTTPException"]
def _find_exceptions():
for _name, obj in iteritems(globals()):
try:
is_http_exception = issubclass(obj, HTTPException)
except TypeError:
is_http_exception = False
if not is_http_exception or obj.code is None:
continue
__all__.append(obj.__name__)
old_obj = default_exceptions.get(obj.code, None)
if old_obj is not None and issubclass(obj, old_obj):
continue
default_exceptions[obj.code] = obj
_find_exceptions()
del _find_exceptions
class Aborter(object):
"""When passed a dict of code -> exception items it can be used as
callable that raises exceptions. If the first argument to the
callable is an integer it will be looked up in the mapping, if it's
a WSGI application it will be raised in a proxy exception.
The rest of the arguments are forwarded to the exception constructor.
"""
def __init__(self, mapping=None, extra=None):
if mapping is None:
mapping = default_exceptions
self.mapping = dict(mapping)
if extra is not None:
self.mapping.update(extra)
def __call__(self, code, *args, **kwargs):
if not args and not kwargs and not isinstance(code, integer_types):
raise HTTPException(response=code)
if code not in self.mapping:
raise LookupError("no exception for %r" % code)
raise self.mapping[code](*args, **kwargs)
def abort(status, *args, **kwargs):
"""Raises an :py:exc:`HTTPException` for the given status code or WSGI
application.
If a status code is given, it will be looked up in the list of
exceptions and will raise that exception. If passed a WSGI application,
it will wrap it in a proxy WSGI exception and raise that::
abort(404) # 404 Not Found
abort(Response('Hello World'))
"""
return _aborter(status, *args, **kwargs)
_aborter = Aborter()
#: An exception that is used to signal both a :exc:`KeyError` and a
#: :exc:`BadRequest`. Used by many of the datastructures.
BadRequestKeyError = BadRequest.wrap(KeyError)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/_compat.py
|
# flake8: noqa
# This whole file is full of lint errors
import functools
import operator
import sys
try:
import builtins
except ImportError:
import __builtin__ as builtins
PY2 = sys.version_info[0] == 2
WIN = sys.platform.startswith("win")
_identity = lambda x: x
if PY2:
unichr = unichr
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs)
itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs)
iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs)
iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs)
int_to_byte = chr
iter_bytes = iter
import collections as collections_abc
exec("def reraise(tp, value, tb=None):\n raise tp, value, tb")
def fix_tuple_repr(obj):
def __repr__(self):
cls = self.__class__
return "%s(%s)" % (
cls.__name__,
", ".join(
"%s=%r" % (field, self[index])
for index, field in enumerate(cls._fields)
),
)
obj.__repr__ = __repr__
return obj
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode("utf-8")
return cls
def native_string_result(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs).encode("utf-8")
return functools.update_wrapper(wrapper, func)
def implements_bool(cls):
cls.__nonzero__ = cls.__bool__
del cls.__bool__
return cls
from itertools import imap, izip, ifilter
range_type = xrange
from StringIO import StringIO
from cStringIO import StringIO as BytesIO
NativeStringIO = BytesIO
def make_literal_wrapper(reference):
return _identity
def normalize_string_tuple(tup):
"""Normalizes a string tuple to a common type. Following Python 2
rules, upgrades to unicode are implicit.
"""
if any(isinstance(x, text_type) for x in tup):
return tuple(to_unicode(x) for x in tup)
return tup
def try_coerce_native(s):
"""Try to coerce a unicode string to native if possible. Otherwise,
leave it as unicode.
"""
try:
return to_native(s)
except UnicodeError:
return s
wsgi_get_bytes = _identity
def wsgi_decoding_dance(s, charset="utf-8", errors="replace"):
return s.decode(charset, errors)
def wsgi_encoding_dance(s, charset="utf-8", errors="replace"):
if isinstance(s, bytes):
return s
return s.encode(charset, errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors="strict"):
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError("Expected bytes")
def to_native(x, charset=sys.getdefaultencoding(), errors="strict"):
if x is None or isinstance(x, str):
return x
return x.encode(charset, errors)
else:
unichr = chr
text_type = str
string_types = (str,)
integer_types = (int,)
iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs))
itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs))
iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs))
iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs))
int_to_byte = operator.methodcaller("to_bytes", 1, "big")
iter_bytes = functools.partial(map, int_to_byte)
import collections.abc as collections_abc
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
fix_tuple_repr = _identity
implements_iterator = _identity
implements_to_string = _identity
implements_bool = _identity
native_string_result = _identity
imap = map
izip = zip
ifilter = filter
range_type = range
from io import StringIO, BytesIO
NativeStringIO = StringIO
_latin1_encode = operator.methodcaller("encode", "latin1")
def make_literal_wrapper(reference):
if isinstance(reference, text_type):
return _identity
return _latin1_encode
def normalize_string_tuple(tup):
"""Ensures that all types in the tuple are either strings
or bytes.
"""
tupiter = iter(tup)
is_text = isinstance(next(tupiter, None), text_type)
for arg in tupiter:
if isinstance(arg, text_type) != is_text:
raise TypeError(
"Cannot mix str and bytes arguments (got %s)" % repr(tup)
)
return tup
try_coerce_native = _identity
wsgi_get_bytes = _latin1_encode
def wsgi_decoding_dance(s, charset="utf-8", errors="replace"):
return s.encode("latin1").decode(charset, errors)
def wsgi_encoding_dance(s, charset="utf-8", errors="replace"):
if isinstance(s, text_type):
s = s.encode(charset)
return s.decode("latin1", errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors="strict"):
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)): # noqa
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError("Expected bytes")
def to_native(x, charset=sys.getdefaultencoding(), errors="strict"):
if x is None or isinstance(x, str):
return x
return x.decode(charset, errors)
def to_unicode(
x, charset=sys.getdefaultencoding(), errors="strict", allow_none_charset=False
):
if x is None:
return None
if not isinstance(x, bytes):
return text_type(x)
if charset is None and allow_none_charset:
return x
return x.decode(charset, errors)
try:
from os import fspath
except ImportError:
# Python < 3.6
# https://www.python.org/dev/peps/pep-0519/#backwards-compatibility
def fspath(path):
return path.__fspath__() if hasattr(path, "__fspath__") else path
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/datastructures.py
|
# -*- coding: utf-8 -*-
"""
werkzeug.datastructures
~~~~~~~~~~~~~~~~~~~~~~~
This module provides mixins and classes with an immutable interface.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import mimetypes
import re
from copy import deepcopy
from itertools import repeat
from . import exceptions
from ._compat import BytesIO
from ._compat import collections_abc
from ._compat import fspath
from ._compat import integer_types
from ._compat import iteritems
from ._compat import iterkeys
from ._compat import iterlists
from ._compat import itervalues
from ._compat import make_literal_wrapper
from ._compat import PY2
from ._compat import string_types
from ._compat import text_type
from ._compat import to_native
from ._internal import _missing
from .filesystem import get_filesystem_encoding
def is_immutable(self):
raise TypeError("%r objects are immutable" % self.__class__.__name__)
def iter_multi_items(mapping):
"""Iterates over the items of a mapping yielding keys and values
without dropping any from more complex structures.
"""
if isinstance(mapping, MultiDict):
for item in iteritems(mapping, multi=True):
yield item
elif isinstance(mapping, dict):
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
for v in value:
yield key, v
else:
yield key, value
else:
for item in mapping:
yield item
def native_itermethods(names):
if not PY2:
return lambda x: x
def setviewmethod(cls, name):
viewmethod_name = "view%s" % name
repr_name = "view_%s" % name
def viewmethod(self, *a, **kw):
return ViewItems(self, name, repr_name, *a, **kw)
viewmethod.__name__ = viewmethod_name
viewmethod.__doc__ = "`%s()` object providing a view on %s" % (
viewmethod_name,
name,
)
setattr(cls, viewmethod_name, viewmethod)
def setitermethod(cls, name):
itermethod = getattr(cls, name)
setattr(cls, "iter%s" % name, itermethod)
def listmethod(self, *a, **kw):
return list(itermethod(self, *a, **kw))
listmethod.__name__ = name
listmethod.__doc__ = "Like :py:meth:`iter%s`, but returns a list." % name
setattr(cls, name, listmethod)
def wrap(cls):
for name in names:
setitermethod(cls, name)
setviewmethod(cls, name)
return cls
return wrap
class ImmutableListMixin(object):
"""Makes a :class:`list` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(tuple(self))
return rv
def __reduce_ex__(self, protocol):
return type(self), (list(self),)
def __delitem__(self, key):
is_immutable(self)
def __iadd__(self, other):
is_immutable(self)
__imul__ = __iadd__
def __setitem__(self, key, value):
is_immutable(self)
def append(self, item):
is_immutable(self)
remove = append
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def reverse(self):
is_immutable(self)
def sort(self, cmp=None, key=None, reverse=None):
is_immutable(self)
class ImmutableList(ImmutableListMixin, list):
"""An immutable :class:`list`.
.. versionadded:: 0.5
:private:
"""
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, list.__repr__(self))
class ImmutableDictMixin(object):
"""Makes a :class:`dict` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
@classmethod
def fromkeys(cls, keys, value=None):
instance = super(cls, cls).__new__(cls)
instance.__init__(zip(keys, repeat(value)))
return instance
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def _iter_hashitems(self):
return iteritems(self)
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
return rv
def setdefault(self, key, default=None):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def pop(self, key, default=None):
is_immutable(self)
def popitem(self):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def __delitem__(self, key):
is_immutable(self)
def clear(self):
is_immutable(self)
class ImmutableMultiDictMixin(ImmutableDictMixin):
"""Makes a :class:`MultiDict` immutable.
.. versionadded:: 0.5
:private:
"""
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def _iter_hashitems(self):
return iteritems(self, multi=True)
def add(self, key, value):
is_immutable(self)
def popitemlist(self):
is_immutable(self)
def poplist(self, key):
is_immutable(self)
def setlist(self, key, new_list):
is_immutable(self)
def setlistdefault(self, key, default_list=None):
is_immutable(self)
class UpdateDictMixin(object):
"""Makes dicts call `self.on_update` on modifications.
.. versionadded:: 0.5
:private:
"""
on_update = None
def calls_update(name): # noqa: B902
def oncall(self, *args, **kw):
rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
if self.on_update is not None:
self.on_update(self)
return rv
oncall.__name__ = name
return oncall
def setdefault(self, key, default=None):
modified = key not in self
rv = super(UpdateDictMixin, self).setdefault(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
def pop(self, key, default=_missing):
modified = key in self
if default is _missing:
rv = super(UpdateDictMixin, self).pop(key)
else:
rv = super(UpdateDictMixin, self).pop(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
__setitem__ = calls_update("__setitem__")
__delitem__ = calls_update("__delitem__")
clear = calls_update("clear")
popitem = calls_update("popitem")
update = calls_update("update")
del calls_update
class TypeConversionDict(dict):
"""Works like a regular dict but the :meth:`get` method can perform
type conversions. :class:`MultiDict` and :class:`CombinedMultiDict`
are subclasses of this class and provide the same feature.
.. versionadded:: 0.5
"""
def get(self, key, default=None, type=None):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = TypeConversionDict(foo='42', bar='blub')
>>> d.get('foo', type=int)
42
>>> d.get('bar', -1, type=int)
-1
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
"""
try:
rv = self[key]
except KeyError:
return default
if type is not None:
try:
rv = type(rv)
except ValueError:
rv = default
return rv
class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
"""Works like a :class:`TypeConversionDict` but does not support
modifications.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return TypeConversionDict(self)
def __copy__(self):
return self
class ViewItems(object):
def __init__(self, multi_dict, method, repr_name, *a, **kw):
self.__multi_dict = multi_dict
self.__method = method
self.__repr_name = repr_name
self.__a = a
self.__kw = kw
def __get_items(self):
return getattr(self.__multi_dict, self.__method)(*self.__a, **self.__kw)
def __repr__(self):
return "%s(%r)" % (self.__repr_name, list(self.__get_items()))
def __iter__(self):
return iter(self.__get_items())
@native_itermethods(["keys", "values", "items", "lists", "listvalues"])
class MultiDict(TypeConversionDict):
"""A :class:`MultiDict` is a dictionary subclass customized to deal with
multiple values for the same key which is for example used by the parsing
functions in the wrappers. This is necessary because some HTML form
elements pass multiple values for the same key.
:class:`MultiDict` implements all standard dictionary methods.
Internally, it saves all values for a key as a list, but the standard dict
access methods will only return the first value for a key. If you want to
gain access to the other values, too, you have to use the `list` methods as
explained below.
Basic Usage:
>>> d = MultiDict([('a', 'b'), ('a', 'c')])
>>> d
MultiDict([('a', 'b'), ('a', 'c')])
>>> d['a']
'b'
>>> d.getlist('a')
['b', 'c']
>>> 'a' in d
True
It behaves like a normal dict thus all dict functions will only return the
first value when multiple values for one key are found.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
A :class:`MultiDict` can be constructed from an iterable of
``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
onwards some keyword parameters.
:param mapping: the initial value for the :class:`MultiDict`. Either a
regular dict, an iterable of ``(key, value)`` tuples
or `None`.
"""
def __init__(self, mapping=None):
if isinstance(mapping, MultiDict):
dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping)))
elif isinstance(mapping, dict):
tmp = {}
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
if len(value) == 0:
continue
value = list(value)
else:
value = [value]
tmp[key] = value
dict.__init__(self, tmp)
else:
tmp = {}
for key, value in mapping or ():
tmp.setdefault(key, []).append(value)
dict.__init__(self, tmp)
def __getstate__(self):
return dict(self.lists())
def __setstate__(self, value):
dict.clear(self)
dict.update(self, value)
def __getitem__(self, key):
"""Return the first data value for this key;
raises KeyError if not found.
:param key: The key to be looked up.
:raise KeyError: if the key does not exist.
"""
if key in self:
lst = dict.__getitem__(self, key)
if len(lst) > 0:
return lst[0]
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
"""Like :meth:`add` but removes an existing key first.
:param key: the key for the value.
:param value: the value to set.
"""
dict.__setitem__(self, key, [value])
def add(self, key, value):
"""Adds a new value for the key.
.. versionadded:: 0.6
:param key: the key for the value.
:param value: the value to add.
"""
dict.setdefault(self, key, []).append(value)
def getlist(self, key, type=None):
"""Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just as `get`
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
"""
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return list(rv)
result = []
for item in rv:
try:
result.append(type(item))
except ValueError:
pass
return result
def setlist(self, key, new_list):
"""Remove the old values for a key and add new ones. Note that the list
you pass the values in will be shallow-copied before it is inserted in
the dictionary.
>>> d = MultiDict()
>>> d.setlist('foo', ['1', '2'])
>>> d['foo']
'1'
>>> d.getlist('foo')
['1', '2']
:param key: The key for which the values are set.
:param new_list: An iterable with the new values for the key. Old values
are removed first.
"""
dict.__setitem__(self, key, list(new_list))
def setdefault(self, key, default=None):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key not in self:
self[key] = default
else:
default = self[key]
return default
def setlistdefault(self, key, default_list=None):
"""Like `setdefault` but sets multiple values. The list returned
is not a copy, but the list that is actually used internally. This
means that you can put new values into the dict by appending items
to the list:
>>> d = MultiDict({"foo": 1})
>>> d.setlistdefault("foo").extend([2, 3])
>>> d.getlist("foo")
[1, 2, 3]
:param key: The key to be looked up.
:param default_list: An iterable of default values. It is either copied
(in case it was a list) or converted into a list
before returned.
:return: a :class:`list`
"""
if key not in self:
default_list = list(default_list or ())
dict.__setitem__(self, key, default_list)
else:
default_list = dict.__getitem__(self, key)
return default_list
def items(self, multi=False):
"""Return an iterator of ``(key, value)`` pairs.
:param multi: If set to `True` the iterator returned will have a pair
for each value of each key. Otherwise it will only
contain pairs for the first value of each key.
"""
for key, values in iteritems(dict, self):
if multi:
for value in values:
yield key, value
else:
yield key, values[0]
def lists(self):
"""Return a iterator of ``(key, values)`` pairs, where values is the list
of all values associated with the key."""
for key, values in iteritems(dict, self):
yield key, list(values)
def keys(self):
return iterkeys(dict, self)
__iter__ = keys
def values(self):
"""Returns an iterator of the first value on every key's value list."""
for values in itervalues(dict, self):
yield values[0]
def listvalues(self):
"""Return an iterator of all values associated with a key. Zipping
:meth:`keys` and this is the same as calling :meth:`lists`:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> zip(d.keys(), d.listvalues()) == d.lists()
True
"""
return itervalues(dict, self)
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self)
def deepcopy(self, memo=None):
"""Return a deep copy of this object."""
return self.__class__(deepcopy(self.to_dict(flat=False), memo))
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first value for each key.
:return: a :class:`dict`
"""
if flat:
return dict(iteritems(self))
return dict(self.lists())
def update(self, other_dict):
"""update() extends rather than replaces existing key lists:
>>> a = MultiDict({'x': 1})
>>> b = MultiDict({'x': 2, 'y': 3})
>>> a.update(b)
>>> a
MultiDict([('y', 3), ('x', 1), ('x', 2)])
If the value list for a key in ``other_dict`` is empty, no new values
will be added to the dict and the key will not be created:
>>> x = {'empty_list': []}
>>> y = MultiDict()
>>> y.update(x)
>>> y
MultiDict([])
"""
for key, value in iter_multi_items(other_dict):
MultiDict.add(self, key, value)
def pop(self, key, default=_missing):
"""Pop the first item for a list on the dict. Afterwards the
key is removed from the dict, so additional values are discarded:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> d.pop("foo")
1
>>> "foo" in d
False
:param key: the key to pop.
:param default: if provided the value to return if the key was
not in the dictionary.
"""
try:
lst = dict.pop(self, key)
if len(lst) == 0:
raise exceptions.BadRequestKeyError(key)
return lst[0]
except KeyError:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(key)
def popitem(self):
"""Pop an item from the dict."""
try:
item = dict.popitem(self)
if len(item[1]) == 0:
raise exceptions.BadRequestKeyError(item)
return (item[0], item[1][0])
except KeyError as e:
raise exceptions.BadRequestKeyError(e.args[0])
def poplist(self, key):
"""Pop the list for a key from the dict. If the key is not in the dict
an empty list is returned.
.. versionchanged:: 0.5
If the key does no longer exist a list is returned instead of
raising an error.
"""
return dict.pop(self, key, [])
def popitemlist(self):
"""Pop a ``(key, list)`` tuple from the dict."""
try:
return dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(e.args[0])
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return self.deepcopy(memo=memo)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, list(iteritems(self, multi=True)))
class _omd_bucket(object):
"""Wraps values in the :class:`OrderedMultiDict`. This makes it
possible to keep an order over multiple different keys. It requires
a lot of extra memory and slows down access a lot, but makes it
possible to access elements in O(1) and iterate in O(n).
"""
__slots__ = ("prev", "key", "value", "next")
def __init__(self, omd, key, value):
self.prev = omd._last_bucket
self.key = key
self.value = value
self.next = None
if omd._first_bucket is None:
omd._first_bucket = self
if omd._last_bucket is not None:
omd._last_bucket.next = self
omd._last_bucket = self
def unlink(self, omd):
if self.prev:
self.prev.next = self.next
if self.next:
self.next.prev = self.prev
if omd._first_bucket is self:
omd._first_bucket = self.next
if omd._last_bucket is self:
omd._last_bucket = self.prev
@native_itermethods(["keys", "values", "items", "lists", "listvalues"])
class OrderedMultiDict(MultiDict):
"""Works like a regular :class:`MultiDict` but preserves the
order of the fields. To convert the ordered multi dict into a
list you can use the :meth:`items` method and pass it ``multi=True``.
In general an :class:`OrderedMultiDict` is an order of magnitude
slower than a :class:`MultiDict`.
.. admonition:: note
Due to a limitation in Python you cannot convert an ordered
multi dict into a regular dict by using ``dict(multidict)``.
Instead you have to use the :meth:`to_dict` method, otherwise
the internal bucket objects are exposed.
"""
def __init__(self, mapping=None):
dict.__init__(self)
self._first_bucket = self._last_bucket = None
if mapping is not None:
OrderedMultiDict.update(self, mapping)
def __eq__(self, other):
if not isinstance(other, MultiDict):
return NotImplemented
if isinstance(other, OrderedMultiDict):
iter1 = iteritems(self, multi=True)
iter2 = iteritems(other, multi=True)
try:
for k1, v1 in iter1:
k2, v2 = next(iter2)
if k1 != k2 or v1 != v2:
return False
except StopIteration:
return False
try:
next(iter2)
except StopIteration:
return True
return False
if len(self) != len(other):
return False
for key, values in iterlists(self):
if other.getlist(key) != values:
return False
return True
__hash__ = None
def __ne__(self, other):
return not self.__eq__(other)
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def __getstate__(self):
return list(iteritems(self, multi=True))
def __setstate__(self, values):
dict.clear(self)
for key, value in values:
self.add(key, value)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)[0].value
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
self.poplist(key)
self.add(key, value)
def __delitem__(self, key):
self.pop(key)
def keys(self):
return (key for key, value in iteritems(self))
__iter__ = keys
def values(self):
return (value for key, value in iteritems(self))
def items(self, multi=False):
ptr = self._first_bucket
if multi:
while ptr is not None:
yield ptr.key, ptr.value
ptr = ptr.next
else:
returned_keys = set()
while ptr is not None:
if ptr.key not in returned_keys:
returned_keys.add(ptr.key)
yield ptr.key, ptr.value
ptr = ptr.next
def lists(self):
returned_keys = set()
ptr = self._first_bucket
while ptr is not None:
if ptr.key not in returned_keys:
yield ptr.key, self.getlist(ptr.key)
returned_keys.add(ptr.key)
ptr = ptr.next
def listvalues(self):
for _key, values in iterlists(self):
yield values
def add(self, key, value):
dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
def getlist(self, key, type=None):
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return [x.value for x in rv]
result = []
for item in rv:
try:
result.append(type(item.value))
except ValueError:
pass
return result
def setlist(self, key, new_list):
self.poplist(key)
for value in new_list:
self.add(key, value)
def setlistdefault(self, key, default_list=None):
raise TypeError("setlistdefault is unsupported for ordered multi dicts")
def update(self, mapping):
for key, value in iter_multi_items(mapping):
OrderedMultiDict.add(self, key, value)
def poplist(self, key):
buckets = dict.pop(self, key, ())
for bucket in buckets:
bucket.unlink(self)
return [x.value for x in buckets]
def pop(self, key, default=_missing):
try:
buckets = dict.pop(self, key)
except KeyError:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(key)
for bucket in buckets:
bucket.unlink(self)
return buckets[0].value
def popitem(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(e.args[0])
for bucket in buckets:
bucket.unlink(self)
return key, buckets[0].value
def popitemlist(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(e.args[0])
for bucket in buckets:
bucket.unlink(self)
return key, [x.value for x in buckets]
def _options_header_vkw(value, kw):
return dump_options_header(
value, dict((k.replace("_", "-"), v) for k, v in kw.items())
)
def _unicodify_header_value(value):
if isinstance(value, bytes):
value = value.decode("latin-1")
if not isinstance(value, text_type):
value = text_type(value)
return value
@native_itermethods(["keys", "values", "items"])
class Headers(object):
"""An object that stores some headers. It has a dict-like interface
but is ordered and can store the same keys multiple times.
This data structure is useful if you want a nicer way to handle WSGI
headers which are stored as tuples in a list.
From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
and will render a page for a ``400 BAD REQUEST`` if caught in a
catch-all for HTTP exceptions.
Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
class, with the exception of `__getitem__`. :mod:`wsgiref` will return
`None` for ``headers['missing']``, whereas :class:`Headers` will raise
a :class:`KeyError`.
To create a new :class:`Headers` object pass it a list or dict of headers
which are used as default values. This does not reuse the list passed
to the constructor for internal usage.
:param defaults: The list of default values for the :class:`Headers`.
.. versionchanged:: 0.9
This data structure now stores unicode values similar to how the
multi dicts do it. The main difference is that bytes can be set as
well which will automatically be latin1 decoded.
.. versionchanged:: 0.9
The :meth:`linked` function was removed without replacement as it
was an API that does not support the changes to the encoding model.
"""
def __init__(self, defaults=None):
self._list = []
if defaults is not None:
if isinstance(defaults, (list, Headers)):
self._list.extend(defaults)
else:
self.extend(defaults)
def __getitem__(self, key, _get_mode=False):
if not _get_mode:
if isinstance(key, integer_types):
return self._list[key]
elif isinstance(key, slice):
return self.__class__(self._list[key])
if not isinstance(key, string_types):
raise exceptions.BadRequestKeyError(key)
ikey = key.lower()
for k, v in self._list:
if k.lower() == ikey:
return v
# micro optimization: if we are in get mode we will catch that
# exception one stack level down so we can raise a standard
# key error instead of our special one.
if _get_mode:
raise KeyError()
raise exceptions.BadRequestKeyError(key)
def __eq__(self, other):
def lowered(item):
return (item[0].lower(),) + item[1:]
return other.__class__ is self.__class__ and set(
map(lowered, other._list)
) == set(map(lowered, self._list))
__hash__ = None
def __ne__(self, other):
return not self.__eq__(other)
def get(self, key, default=None, type=None, as_bytes=False):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = Headers([('Content-Length', '42')])
>>> d.get('Content-Length', type=int)
42
If a headers object is bound you must not add unicode strings
because no encoding takes place.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
:param as_bytes: return bytes instead of unicode strings.
"""
try:
rv = self.__getitem__(key, _get_mode=True)
except KeyError:
return default
if as_bytes:
rv = rv.encode("latin1")
if type is None:
return rv
try:
return type(rv)
except ValueError:
return default
def getlist(self, key, type=None, as_bytes=False):
"""Return the list of items for a given key. If that key is not in the
:class:`Headers`, the return value will be an empty list. Just as
:meth:`get` :meth:`getlist` accepts a `type` parameter. All items will
be converted with the callable defined there.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
:param as_bytes: return bytes instead of unicode strings.
"""
ikey = key.lower()
result = []
for k, v in self:
if k.lower() == ikey:
if as_bytes:
v = v.encode("latin1")
if type is not None:
try:
v = type(v)
except ValueError:
continue
result.append(v)
return result
def get_all(self, name):
"""Return a list of all the values for the named field.
This method is compatible with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.get_all` method.
"""
return self.getlist(name)
def items(self, lower=False):
for key, value in self:
if lower:
key = key.lower()
yield key, value
def keys(self, lower=False):
for key, _ in iteritems(self, lower):
yield key
def values(self):
for _, value in iteritems(self):
yield value
def extend(self, *args, **kwargs):
"""Extend headers in this object with items from another object
containing header items as well as keyword arguments.
To replace existing keys instead of extending, use
:meth:`update` instead.
If provided, the first argument can be another :class:`Headers`
object, a :class:`MultiDict`, :class:`dict`, or iterable of
pairs.
.. versionchanged:: 1.0
Support :class:`MultiDict`. Allow passing ``kwargs``.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
for key, value in iter_multi_items(args[0]):
self.add(key, value)
for key, value in iter_multi_items(kwargs):
self.add(key, value)
def __delitem__(self, key, _index_operation=True):
if _index_operation and isinstance(key, (integer_types, slice)):
del self._list[key]
return
key = key.lower()
new = []
for k, v in self._list:
if k.lower() != key:
new.append((k, v))
self._list[:] = new
def remove(self, key):
"""Remove a key.
:param key: The key to be removed.
"""
return self.__delitem__(key, _index_operation=False)
def pop(self, key=None, default=_missing):
"""Removes and returns a key or index.
:param key: The key to be popped. If this is an integer the item at
that position is removed, if it's a string the value for
that key is. If the key is omitted or `None` the last
item is removed.
:return: an item.
"""
if key is None:
return self._list.pop()
if isinstance(key, integer_types):
return self._list.pop(key)
try:
rv = self[key]
self.remove(key)
except KeyError:
if default is not _missing:
return default
raise
return rv
def popitem(self):
"""Removes a key or index and returns a (key, value) item."""
return self.pop()
def __contains__(self, key):
"""Check if a key is present."""
try:
self.__getitem__(key, _get_mode=True)
except KeyError:
return False
return True
has_key = __contains__
def __iter__(self):
"""Yield ``(key, value)`` tuples."""
return iter(self._list)
def __len__(self):
return len(self._list)
def add(self, _key, _value, **kw):
"""Add a new header tuple to the list.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes::
>>> d = Headers()
>>> d.add('Content-Type', 'text/plain')
>>> d.add('Content-Disposition', 'attachment', filename='foo.png')
The keyword argument dumping uses :func:`dump_options_header`
behind the scenes.
.. versionadded:: 0.4.1
keyword arguments were added for :mod:`wsgiref` compatibility.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_key = _unicodify_header_value(_key)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
self._list.append((_key, _value))
def _validate_value(self, value):
if not isinstance(value, text_type):
raise TypeError("Value should be unicode.")
if u"\n" in value or u"\r" in value:
raise ValueError(
"Detected newline in header value. This is "
"a potential security problem"
)
def add_header(self, _key, _value, **_kw):
"""Add a new header tuple to the list.
An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.add_header` method.
"""
self.add(_key, _value, **_kw)
def clear(self):
"""Clears all headers."""
del self._list[:]
def set(self, _key, _value, **kw):
"""Remove all header tuples for `key` and add a new one. The newly
added key either appears at the end of the list if there was no
entry or replaces the first one.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes. See :meth:`add` for
more information.
.. versionchanged:: 0.6.1
:meth:`set` now accepts the same arguments as :meth:`add`.
:param key: The key to be inserted.
:param value: The value to be inserted.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_key = _unicodify_header_value(_key)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
if not self._list:
self._list.append((_key, _value))
return
listiter = iter(self._list)
ikey = _key.lower()
for idx, (old_key, _old_value) in enumerate(listiter):
if old_key.lower() == ikey:
# replace first occurrence
self._list[idx] = (_key, _value)
break
else:
self._list.append((_key, _value))
return
self._list[idx + 1 :] = [t for t in listiter if t[0].lower() != ikey]
def setlist(self, key, values):
"""Remove any existing values for a header and add new ones.
:param key: The header key to set.
:param values: An iterable of values to set for the key.
.. versionadded:: 1.0
"""
if values:
values_iter = iter(values)
self.set(key, next(values_iter))
for value in values_iter:
self.add(key, value)
else:
self.remove(key)
def setdefault(self, key, default):
"""Return the first value for the key if it is in the headers,
otherwise set the header to the value given by ``default`` and
return that.
:param key: The header key to get.
:param default: The value to set for the key if it is not in the
headers.
"""
if key in self:
return self[key]
self.set(key, default)
return default
def setlistdefault(self, key, default):
"""Return the list of values for the key if it is in the
headers, otherwise set the header to the list of values given
by ``default`` and return that.
Unlike :meth:`MultiDict.setlistdefault`, modifying the returned
list will not affect the headers.
:param key: The header key to get.
:param default: An iterable of values to set for the key if it
is not in the headers.
.. versionadded:: 1.0
"""
if key not in self:
self.setlist(key, default)
return self.getlist(key)
def __setitem__(self, key, value):
"""Like :meth:`set` but also supports index/slice based setting."""
if isinstance(key, (slice, integer_types)):
if isinstance(key, integer_types):
value = [value]
value = [
(_unicodify_header_value(k), _unicodify_header_value(v))
for (k, v) in value
]
[self._validate_value(v) for (k, v) in value]
if isinstance(key, integer_types):
self._list[key] = value[0]
else:
self._list[key] = value
else:
self.set(key, value)
def update(self, *args, **kwargs):
"""Replace headers in this object with items from another
headers object and keyword arguments.
To extend existing keys instead of replacing, use :meth:`extend`
instead.
If provided, the first argument can be another :class:`Headers`
object, a :class:`MultiDict`, :class:`dict`, or iterable of
pairs.
.. versionadded:: 1.0
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
mapping = args[0]
if isinstance(mapping, (Headers, MultiDict)):
for key in mapping.keys():
self.setlist(key, mapping.getlist(key))
elif isinstance(mapping, dict):
for key, value in iteritems(mapping):
if isinstance(value, (list, tuple)):
self.setlist(key, value)
else:
self.set(key, value)
else:
for key, value in mapping:
self.set(key, value)
for key, value in iteritems(kwargs):
if isinstance(value, (list, tuple)):
self.setlist(key, value)
else:
self.set(key, value)
def to_wsgi_list(self):
"""Convert the headers into a list suitable for WSGI.
The values are byte strings in Python 2 converted to latin1 and unicode
strings in Python 3 for the WSGI server to encode.
:return: list
"""
if PY2:
return [(to_native(k), v.encode("latin1")) for k, v in self]
return list(self)
def copy(self):
return self.__class__(self._list)
def __copy__(self):
return self.copy()
def __str__(self):
"""Returns formatted headers suitable for HTTP transmission."""
strs = []
for key, value in self.to_wsgi_list():
strs.append("%s: %s" % (key, value))
strs.append("\r\n")
return "\r\n".join(strs)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, list(self))
class ImmutableHeadersMixin(object):
"""Makes a :class:`Headers` immutable. We do not mark them as
hashable though since the only usecase for this datastructure
in Werkzeug is a view on a mutable structure.
.. versionadded:: 0.5
:private:
"""
def __delitem__(self, key, **kwargs):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def set(self, key, value):
is_immutable(self)
def setlist(self, key, value):
is_immutable(self)
def add(self, item):
is_immutable(self)
def add_header(self, item):
is_immutable(self)
def remove(self, item):
is_immutable(self)
def extend(self, *args, **kwargs):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def popitem(self):
is_immutable(self)
def setdefault(self, key, default):
is_immutable(self)
def setlistdefault(self, key, default):
is_immutable(self)
class EnvironHeaders(ImmutableHeadersMixin, Headers):
"""Read only version of the headers from a WSGI environment. This
provides the same interface as `Headers` and is constructed from
a WSGI environment.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
HTTP exceptions.
"""
def __init__(self, environ):
self.environ = environ
def __eq__(self, other):
return self.environ is other.environ
__hash__ = None
def __getitem__(self, key, _get_mode=False):
# _get_mode is a no-op for this class as there is no index but
# used because get() calls it.
if not isinstance(key, string_types):
raise KeyError(key)
key = key.upper().replace("-", "_")
if key in ("CONTENT_TYPE", "CONTENT_LENGTH"):
return _unicodify_header_value(self.environ[key])
return _unicodify_header_value(self.environ["HTTP_" + key])
def __len__(self):
# the iter is necessary because otherwise list calls our
# len which would call list again and so forth.
return len(list(iter(self)))
def __iter__(self):
for key, value in iteritems(self.environ):
if key.startswith("HTTP_") and key not in (
"HTTP_CONTENT_TYPE",
"HTTP_CONTENT_LENGTH",
):
yield (
key[5:].replace("_", "-").title(),
_unicodify_header_value(value),
)
elif key in ("CONTENT_TYPE", "CONTENT_LENGTH") and value:
yield (key.replace("_", "-").title(), _unicodify_header_value(value))
def copy(self):
raise TypeError("cannot create %r copies" % self.__class__.__name__)
@native_itermethods(["keys", "values", "items", "lists", "listvalues"])
class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
"""A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
instances as sequence and it will combine the return values of all wrapped
dicts:
>>> from werkzeug.datastructures import CombinedMultiDict, MultiDict
>>> post = MultiDict([('foo', 'bar')])
>>> get = MultiDict([('blub', 'blah')])
>>> combined = CombinedMultiDict([get, post])
>>> combined['foo']
'bar'
>>> combined['blub']
'blah'
This works for all read operations and will raise a `TypeError` for
methods that usually change data which isn't possible.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
"""
def __reduce_ex__(self, protocol):
return type(self), (self.dicts,)
def __init__(self, dicts=None):
self.dicts = dicts or []
@classmethod
def fromkeys(cls):
raise TypeError("cannot create %r instances by fromkeys" % cls.__name__)
def __getitem__(self, key):
for d in self.dicts:
if key in d:
return d[key]
raise exceptions.BadRequestKeyError(key)
def get(self, key, default=None, type=None):
for d in self.dicts:
if key in d:
if type is not None:
try:
return type(d[key])
except ValueError:
continue
return d[key]
return default
def getlist(self, key, type=None):
rv = []
for d in self.dicts:
rv.extend(d.getlist(key, type))
return rv
def _keys_impl(self):
"""This function exists so __len__ can be implemented more efficiently,
saving one list creation from an iterator.
Using this for Python 2's ``dict.keys`` behavior would be useless since
`dict.keys` in Python 2 returns a list, while we have a set here.
"""
rv = set()
for d in self.dicts:
rv.update(iterkeys(d))
return rv
def keys(self):
return iter(self._keys_impl())
__iter__ = keys
def items(self, multi=False):
found = set()
for d in self.dicts:
for key, value in iteritems(d, multi):
if multi:
yield key, value
elif key not in found:
found.add(key)
yield key, value
def values(self):
for _key, value in iteritems(self):
yield value
def lists(self):
rv = {}
for d in self.dicts:
for key, values in iterlists(d):
rv.setdefault(key, []).extend(values)
return iteritems(rv)
def listvalues(self):
return (x[1] for x in self.lists())
def copy(self):
"""Return a shallow mutable copy of this object.
This returns a :class:`MultiDict` representing the data at the
time of copying. The copy will no longer reflect changes to the
wrapped dicts.
.. versionchanged:: 0.15
Return a mutable :class:`MultiDict`.
"""
return MultiDict(self)
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first item for each key.
:return: a :class:`dict`
"""
rv = {}
for d in reversed(self.dicts):
rv.update(d.to_dict(flat))
return rv
def __len__(self):
return len(self._keys_impl())
def __contains__(self, key):
for d in self.dicts:
if key in d:
return True
return False
has_key = __contains__
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.dicts)
class FileMultiDict(MultiDict):
"""A special :class:`MultiDict` that has convenience methods to add
files to it. This is used for :class:`EnvironBuilder` and generally
useful for unittesting.
.. versionadded:: 0.5
"""
def add_file(self, name, file, filename=None, content_type=None):
"""Adds a new file to the dict. `file` can be a file name or
a :class:`file`-like or a :class:`FileStorage` object.
:param name: the name of the field.
:param file: a filename or :class:`file`-like object
:param filename: an optional filename
:param content_type: an optional content type
"""
if isinstance(file, FileStorage):
value = file
else:
if isinstance(file, string_types):
if filename is None:
filename = file
file = open(file, "rb")
if filename and content_type is None:
content_type = (
mimetypes.guess_type(filename)[0] or "application/octet-stream"
)
value = FileStorage(file, filename, name, content_type)
self.add(name, value)
class ImmutableDict(ImmutableDictMixin, dict):
"""An immutable :class:`dict`.
.. versionadded:: 0.5
"""
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, dict.__repr__(self))
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return dict(self)
def __copy__(self):
return self
class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
"""An immutable :class:`MultiDict`.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return MultiDict(self)
def __copy__(self):
return self
class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
"""An immutable :class:`OrderedMultiDict`.
.. versionadded:: 0.6
"""
def _iter_hashitems(self):
return enumerate(iteritems(self, multi=True))
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return OrderedMultiDict(self)
def __copy__(self):
return self
@native_itermethods(["values"])
class Accept(ImmutableList):
"""An :class:`Accept` object is just a list subclass for lists of
``(value, quality)`` tuples. It is automatically sorted by specificity
and quality.
All :class:`Accept` objects work similar to a list but provide extra
functionality for working with the data. Containment checks are
normalized to the rules of that header:
>>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
>>> a.best
'ISO-8859-1'
>>> 'iso-8859-1' in a
True
>>> 'UTF8' in a
True
>>> 'utf7' in a
False
To get the quality for an item you can use normal item lookup:
>>> print a['utf-8']
0.7
>>> a['utf7']
0
.. versionchanged:: 0.5
:class:`Accept` objects are forced immutable now.
.. versionchanged:: 1.0.0
:class:`Accept` internal values are no longer ordered
alphabetically for equal quality tags. Instead the initial
order is preserved.
"""
def __init__(self, values=()):
if values is None:
list.__init__(self)
self.provided = False
elif isinstance(values, Accept):
self.provided = values.provided
list.__init__(self, values)
else:
self.provided = True
values = sorted(
values, key=lambda x: (self._specificity(x[0]), x[1]), reverse=True,
)
list.__init__(self, values)
def _specificity(self, value):
"""Returns a tuple describing the value's specificity."""
return (value != "*",)
def _value_matches(self, value, item):
"""Check if a value matches a given accept item."""
return item == "*" or item.lower() == value.lower()
def __getitem__(self, key):
"""Besides index lookup (getting item n) you can also pass it a string
to get the quality for the item. If the item is not in the list, the
returned quality is ``0``.
"""
if isinstance(key, string_types):
return self.quality(key)
return list.__getitem__(self, key)
def quality(self, key):
"""Returns the quality of the key.
.. versionadded:: 0.6
In previous versions you had to use the item-lookup syntax
(eg: ``obj[key]`` instead of ``obj.quality(key)``)
"""
for item, quality in self:
if self._value_matches(key, item):
return quality
return 0
def __contains__(self, value):
for item, _quality in self:
if self._value_matches(value, item):
return True
return False
def __repr__(self):
return "%s([%s])" % (
self.__class__.__name__,
", ".join("(%r, %s)" % (x, y) for x, y in self),
)
def index(self, key):
"""Get the position of an entry or raise :exc:`ValueError`.
:param key: The key to be looked up.
.. versionchanged:: 0.5
This used to raise :exc:`IndexError`, which was inconsistent
with the list API.
"""
if isinstance(key, string_types):
for idx, (item, _quality) in enumerate(self):
if self._value_matches(key, item):
return idx
raise ValueError(key)
return list.index(self, key)
def find(self, key):
"""Get the position of an entry or return -1.
:param key: The key to be looked up.
"""
try:
return self.index(key)
except ValueError:
return -1
def values(self):
"""Iterate over all values."""
for item in self:
yield item[0]
def to_header(self):
"""Convert the header set into an HTTP header string."""
result = []
for value, quality in self:
if quality != 1:
value = "%s;q=%s" % (value, quality)
result.append(value)
return ",".join(result)
def __str__(self):
return self.to_header()
def _best_single_match(self, match):
for client_item, quality in self:
if self._value_matches(match, client_item):
# self is sorted by specificity descending, we can exit
return client_item, quality
def best_match(self, matches, default=None):
"""Returns the best match from a list of possible matches based
on the specificity and quality of the client. If two items have the
same quality and specificity, the one is returned that comes first.
:param matches: a list of matches to check for
:param default: the value that is returned if none match
"""
result = default
best_quality = -1
best_specificity = (-1,)
for server_item in matches:
match = self._best_single_match(server_item)
if not match:
continue
client_item, quality = match
specificity = self._specificity(client_item)
if quality <= 0 or quality < best_quality:
continue
# better quality or same quality but more specific => better match
if quality > best_quality or specificity > best_specificity:
result = server_item
best_quality = quality
best_specificity = specificity
return result
@property
def best(self):
"""The best match as value."""
if self:
return self[0][0]
_mime_split_re = re.compile(r"/|(?:\s*;\s*)")
def _normalize_mime(value):
return _mime_split_re.split(value.lower())
class MIMEAccept(Accept):
"""Like :class:`Accept` but with special methods and behavior for
mimetypes.
"""
def _specificity(self, value):
return tuple(x != "*" for x in _mime_split_re.split(value))
def _value_matches(self, value, item):
# item comes from the client, can't match if it's invalid.
if "/" not in item:
return False
# value comes from the application, tell the developer when it
# doesn't look valid.
if "/" not in value:
raise ValueError("invalid mimetype %r" % value)
# Split the match value into type, subtype, and a sorted list of parameters.
normalized_value = _normalize_mime(value)
value_type, value_subtype = normalized_value[:2]
value_params = sorted(normalized_value[2:])
# "*/*" is the only valid value that can start with "*".
if value_type == "*" and value_subtype != "*":
raise ValueError("invalid mimetype %r" % value)
# Split the accept item into type, subtype, and parameters.
normalized_item = _normalize_mime(item)
item_type, item_subtype = normalized_item[:2]
item_params = sorted(normalized_item[2:])
# "*/not-*" from the client is invalid, can't match.
if item_type == "*" and item_subtype != "*":
return False
return (
(item_type == "*" and item_subtype == "*")
or (value_type == "*" and value_subtype == "*")
) or (
item_type == value_type
and (
item_subtype == "*"
or value_subtype == "*"
or (item_subtype == value_subtype and item_params == value_params)
)
)
@property
def accept_html(self):
"""True if this object accepts HTML."""
return (
"text/html" in self or "application/xhtml+xml" in self or self.accept_xhtml
)
@property
def accept_xhtml(self):
"""True if this object accepts XHTML."""
return "application/xhtml+xml" in self or "application/xml" in self
@property
def accept_json(self):
"""True if this object accepts JSON."""
return "application/json" in self
_locale_delim_re = re.compile(r"[_-]")
def _normalize_lang(value):
"""Process a language tag for matching."""
return _locale_delim_re.split(value.lower())
class LanguageAccept(Accept):
"""Like :class:`Accept` but with normalization for language tags."""
def _value_matches(self, value, item):
return item == "*" or _normalize_lang(value) == _normalize_lang(item)
def best_match(self, matches, default=None):
"""Given a list of supported values, finds the best match from
the list of accepted values.
Language tags are normalized for the purpose of matching, but
are returned unchanged.
If no exact match is found, this will fall back to matching
the first subtag (primary language only), first with the
accepted values then with the match values. This partial is not
applied to any other language subtags.
The default is returned if no exact or fallback match is found.
:param matches: A list of supported languages to find a match.
:param default: The value that is returned if none match.
"""
# Look for an exact match first. If a client accepts "en-US",
# "en-US" is a valid match at this point.
result = super(LanguageAccept, self).best_match(matches)
if result is not None:
return result
# Fall back to accepting primary tags. If a client accepts
# "en-US", "en" is a valid match at this point. Need to use
# re.split to account for 2 or 3 letter codes.
fallback = Accept(
[(_locale_delim_re.split(item[0], 1)[0], item[1]) for item in self]
)
result = fallback.best_match(matches)
if result is not None:
return result
# Fall back to matching primary tags. If the client accepts
# "en", "en-US" is a valid match at this point.
fallback_matches = [_locale_delim_re.split(item, 1)[0] for item in matches]
result = super(LanguageAccept, self).best_match(fallback_matches)
# Return a value from the original match list. Find the first
# original value that starts with the matched primary tag.
if result is not None:
return next(item for item in matches if item.startswith(result))
return default
class CharsetAccept(Accept):
"""Like :class:`Accept` but with normalization for charsets."""
def _value_matches(self, value, item):
def _normalize(name):
try:
return codecs.lookup(name).name
except LookupError:
return name.lower()
return item == "*" or _normalize(value) == _normalize(item)
def cache_property(key, empty, type):
"""Return a new property object for a cache header. Useful if you
want to add support for a cache extension in a subclass."""
return property(
lambda x: x._get_cache_value(key, empty, type),
lambda x, v: x._set_cache_value(key, v, type),
lambda x: x._del_cache_value(key),
"accessor for %r" % key,
)
class _CacheControl(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Cache-Control header. It
has accessors for all the cache-control directives specified in RFC 2616.
The class does not differentiate between request and response directives.
Because the cache-control directives in the HTTP header use dashes the
python descriptors use underscores for that.
To get a header of the :class:`CacheControl` object again you can convert
the object into a string or call the :meth:`to_header` method. If you plan
to subclass it and add your own items have a look at the sourcecode for
that class.
.. versionchanged:: 0.4
Setting `no_cache` or `private` to boolean `True` will set the implicit
none-value which is ``*``:
>>> cc = ResponseCacheControl()
>>> cc.no_cache = True
>>> cc
<ResponseCacheControl 'no-cache'>
>>> cc.no_cache
'*'
>>> cc.no_cache = None
>>> cc
<ResponseCacheControl ''>
In versions before 0.5 the behavior documented here affected the now
no longer existing `CacheControl` class.
"""
no_cache = cache_property("no-cache", "*", None)
no_store = cache_property("no-store", None, bool)
max_age = cache_property("max-age", -1, int)
no_transform = cache_property("no-transform", None, None)
def __init__(self, values=(), on_update=None):
dict.__init__(self, values or ())
self.on_update = on_update
self.provided = values is not None
def _get_cache_value(self, key, empty, type):
"""Used internally by the accessor properties."""
if type is bool:
return key in self
if key in self:
value = self[key]
if value is None:
return empty
elif type is not None:
try:
value = type(value)
except ValueError:
pass
return value
def _set_cache_value(self, key, value, type):
"""Used internally by the accessor properties."""
if type is bool:
if value:
self[key] = None
else:
self.pop(key, None)
else:
if value is None:
self.pop(key, None)
elif value is True:
self[key] = None
else:
self[key] = value
def _del_cache_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
return dump_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
return "<%s %s>" % (
self.__class__.__name__,
" ".join("%s=%r" % (k, v) for k, v in sorted(self.items())),
)
class RequestCacheControl(ImmutableDictMixin, _CacheControl):
"""A cache control for requests. This is immutable and gives access
to all the request-relevant cache control headers.
To get a header of the :class:`RequestCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
max_stale = cache_property("max-stale", "*", int)
min_fresh = cache_property("min-fresh", "*", int)
only_if_cached = cache_property("only-if-cached", None, bool)
class ResponseCacheControl(_CacheControl):
"""A cache control for responses. Unlike :class:`RequestCacheControl`
this is mutable and gives access to response-relevant cache control
headers.
To get a header of the :class:`ResponseCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
public = cache_property("public", None, bool)
private = cache_property("private", "*", None)
must_revalidate = cache_property("must-revalidate", None, bool)
proxy_revalidate = cache_property("proxy-revalidate", None, bool)
s_maxage = cache_property("s-maxage", None, None)
immutable = cache_property("immutable", None, bool)
# attach cache_property to the _CacheControl as staticmethod
# so that others can reuse it.
_CacheControl.cache_property = staticmethod(cache_property)
def csp_property(key):
"""Return a new property object for a content security policy header.
Useful if you want to add support for a csp extension in a
subclass.
"""
return property(
lambda x: x._get_value(key),
lambda x, v: x._set_value(key, v),
lambda x: x._del_value(key),
"accessor for %r" % key,
)
class ContentSecurityPolicy(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Content Security Policy
header. It has accessors for all the level 3 policies.
Because the csp directives in the HTTP header use dashes the
python descriptors use underscores for that.
To get a header of the :class:`ContentSecuirtyPolicy` object again
you can convert the object into a string or call the
:meth:`to_header` method. If you plan to subclass it and add your
own items have a look at the sourcecode for that class.
.. versionadded:: 1.0.0
Support for Content Security Policy headers was added.
"""
base_uri = csp_property("base-uri")
child_src = csp_property("child-src")
connect_src = csp_property("connect-src")
default_src = csp_property("default-src")
font_src = csp_property("font-src")
form_action = csp_property("form-action")
frame_ancestors = csp_property("frame-ancestors")
frame_src = csp_property("frame-src")
img_src = csp_property("img-src")
manifest_src = csp_property("manifest-src")
media_src = csp_property("media-src")
navigate_to = csp_property("navigate-to")
object_src = csp_property("object-src")
prefetch_src = csp_property("prefetch-src")
plugin_types = csp_property("plugin-types")
report_to = csp_property("report-to")
report_uri = csp_property("report-uri")
sandbox = csp_property("sandbox")
script_src = csp_property("script-src")
script_src_attr = csp_property("script-src-attr")
script_src_elem = csp_property("script-src-elem")
style_src = csp_property("style-src")
style_src_attr = csp_property("style-src-attr")
style_src_elem = csp_property("style-src-elem")
worker_src = csp_property("worker-src")
def __init__(self, values=(), on_update=None):
dict.__init__(self, values or ())
self.on_update = on_update
self.provided = values is not None
def _get_value(self, key):
"""Used internally by the accessor properties."""
return self.get(key)
def _set_value(self, key, value):
"""Used internally by the accessor properties."""
if value is None:
self.pop(key, None)
else:
self[key] = value
def _del_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
return dump_csp_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
return "<%s %s>" % (
self.__class__.__name__,
" ".join("%s=%r" % (k, v) for k, v in sorted(self.items())),
)
class CallbackDict(UpdateDictMixin, dict):
"""A dict that calls a function passed every time something is changed.
The function is passed the dict instance.
"""
def __init__(self, initial=None, on_update=None):
dict.__init__(self, initial or ())
self.on_update = on_update
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, dict.__repr__(self))
class HeaderSet(collections_abc.MutableSet):
"""Similar to the :class:`ETags` class this implements a set-like structure.
Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
content-language headers.
If not constructed using the :func:`parse_set_header` function the
instantiation works like this:
>>> hs = HeaderSet(['foo', 'bar', 'baz'])
>>> hs
HeaderSet(['foo', 'bar', 'baz'])
"""
def __init__(self, headers=None, on_update=None):
self._headers = list(headers or ())
self._set = set([x.lower() for x in self._headers])
self.on_update = on_update
def add(self, header):
"""Add a new header to the set."""
self.update((header,))
def remove(self, header):
"""Remove a header from the set. This raises an :exc:`KeyError` if the
header is not in the set.
.. versionchanged:: 0.5
In older versions a :exc:`IndexError` was raised instead of a
:exc:`KeyError` if the object was missing.
:param header: the header to be removed.
"""
key = header.lower()
if key not in self._set:
raise KeyError(header)
self._set.remove(key)
for idx, key in enumerate(self._headers):
if key.lower() == header:
del self._headers[idx]
break
if self.on_update is not None:
self.on_update(self)
def update(self, iterable):
"""Add all the headers from the iterable to the set.
:param iterable: updates the set with the items from the iterable.
"""
inserted_any = False
for header in iterable:
key = header.lower()
if key not in self._set:
self._headers.append(header)
self._set.add(key)
inserted_any = True
if inserted_any and self.on_update is not None:
self.on_update(self)
def discard(self, header):
"""Like :meth:`remove` but ignores errors.
:param header: the header to be discarded.
"""
try:
return self.remove(header)
except KeyError:
pass
def find(self, header):
"""Return the index of the header in the set or return -1 if not found.
:param header: the header to be looked up.
"""
header = header.lower()
for idx, item in enumerate(self._headers):
if item.lower() == header:
return idx
return -1
def index(self, header):
"""Return the index of the header in the set or raise an
:exc:`IndexError`.
:param header: the header to be looked up.
"""
rv = self.find(header)
if rv < 0:
raise IndexError(header)
return rv
def clear(self):
"""Clear the set."""
self._set.clear()
del self._headers[:]
if self.on_update is not None:
self.on_update(self)
def as_set(self, preserve_casing=False):
"""Return the set as real python set type. When calling this, all
the items are converted to lowercase and the ordering is lost.
:param preserve_casing: if set to `True` the items in the set returned
will have the original case like in the
:class:`HeaderSet`, otherwise they will
be lowercase.
"""
if preserve_casing:
return set(self._headers)
return set(self._set)
def to_header(self):
"""Convert the header set into an HTTP header string."""
return ", ".join(map(quote_header_value, self._headers))
def __getitem__(self, idx):
return self._headers[idx]
def __delitem__(self, idx):
rv = self._headers.pop(idx)
self._set.remove(rv.lower())
if self.on_update is not None:
self.on_update(self)
def __setitem__(self, idx, value):
old = self._headers[idx]
self._set.remove(old.lower())
self._headers[idx] = value
self._set.add(value.lower())
if self.on_update is not None:
self.on_update(self)
def __contains__(self, header):
return header.lower() in self._set
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._headers)
def __nonzero__(self):
return bool(self._set)
def __str__(self):
return self.to_header()
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._headers)
class ETags(collections_abc.Container, collections_abc.Iterable):
"""A set that can be used to check if one etag is present in a collection
of etags.
"""
def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
self._strong = frozenset(not star_tag and strong_etags or ())
self._weak = frozenset(weak_etags or ())
self.star_tag = star_tag
def as_set(self, include_weak=False):
"""Convert the `ETags` object into a python set. Per default all the
weak etags are not part of this set."""
rv = set(self._strong)
if include_weak:
rv.update(self._weak)
return rv
def is_weak(self, etag):
"""Check if an etag is weak."""
return etag in self._weak
def is_strong(self, etag):
"""Check if an etag is strong."""
return etag in self._strong
def contains_weak(self, etag):
"""Check if an etag is part of the set including weak and strong tags."""
return self.is_weak(etag) or self.contains(etag)
def contains(self, etag):
"""Check if an etag is part of the set ignoring weak tags.
It is also possible to use the ``in`` operator.
"""
if self.star_tag:
return True
return self.is_strong(etag)
def contains_raw(self, etag):
"""When passed a quoted tag it will check if this tag is part of the
set. If the tag is weak it is checked against weak and strong tags,
otherwise strong only."""
etag, weak = unquote_etag(etag)
if weak:
return self.contains_weak(etag)
return self.contains(etag)
def to_header(self):
"""Convert the etags set into a HTTP header string."""
if self.star_tag:
return "*"
return ", ".join(
['"%s"' % x for x in self._strong] + ['W/"%s"' % x for x in self._weak]
)
def __call__(self, etag=None, data=None, include_weak=False):
if [etag, data].count(None) != 1:
raise TypeError("either tag or data required, but at least one")
if etag is None:
etag = generate_etag(data)
if include_weak:
if etag in self._weak:
return True
return etag in self._strong
def __bool__(self):
return bool(self.star_tag or self._strong or self._weak)
__nonzero__ = __bool__
def __str__(self):
return self.to_header()
def __iter__(self):
return iter(self._strong)
def __contains__(self, etag):
return self.contains(etag)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, str(self))
class IfRange(object):
"""Very simple object that represents the `If-Range` header in parsed
form. It will either have neither a etag or date or one of either but
never both.
.. versionadded:: 0.7
"""
def __init__(self, etag=None, date=None):
#: The etag parsed and unquoted. Ranges always operate on strong
#: etags so the weakness information is not necessary.
self.etag = etag
#: The date in parsed format or `None`.
self.date = date
def to_header(self):
"""Converts the object back into an HTTP header."""
if self.date is not None:
return http_date(self.date)
if self.etag is not None:
return quote_etag(self.etag)
return ""
def __str__(self):
return self.to_header()
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, str(self))
class Range(object):
"""Represents a ``Range`` header. All methods only support only
bytes as the unit. Stores a list of ranges if given, but the methods
only work if only one range is provided.
:raise ValueError: If the ranges provided are invalid.
.. versionchanged:: 0.15
The ranges passed in are validated.
.. versionadded:: 0.7
"""
def __init__(self, units, ranges):
#: The units of this range. Usually "bytes".
self.units = units
#: A list of ``(begin, end)`` tuples for the range header provided.
#: The ranges are non-inclusive.
self.ranges = ranges
for start, end in ranges:
if start is None or (end is not None and (start < 0 or start >= end)):
raise ValueError("{} is not a valid range.".format((start, end)))
def range_for_length(self, length):
"""If the range is for bytes, the length is not None and there is
exactly one range and it is satisfiable it returns a ``(start, stop)``
tuple, otherwise `None`.
"""
if self.units != "bytes" or length is None or len(self.ranges) != 1:
return None
start, end = self.ranges[0]
if end is None:
end = length
if start < 0:
start += length
if is_byte_range_valid(start, end, length):
return start, min(end, length)
def make_content_range(self, length):
"""Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
"""
rng = self.range_for_length(length)
if rng is not None:
return ContentRange(self.units, rng[0], rng[1], length)
def to_header(self):
"""Converts the object back into an HTTP header."""
ranges = []
for begin, end in self.ranges:
if end is None:
ranges.append("%s-" % begin if begin >= 0 else str(begin))
else:
ranges.append("%s-%s" % (begin, end - 1))
return "%s=%s" % (self.units, ",".join(ranges))
def to_content_range_header(self, length):
"""Converts the object into `Content-Range` HTTP header,
based on given length
"""
range_for_length = self.range_for_length(length)
if range_for_length is not None:
return "%s %d-%d/%d" % (
self.units,
range_for_length[0],
range_for_length[1] - 1,
length,
)
return None
def __str__(self):
return self.to_header()
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, str(self))
class ContentRange(object):
"""Represents the content range header.
.. versionadded:: 0.7
"""
def __init__(self, units, start, stop, length=None, on_update=None):
assert is_byte_range_valid(start, stop, length), "Bad range provided"
self.on_update = on_update
self.set(start, stop, length, units)
def _callback_property(name): # noqa: B902
def fget(self):
return getattr(self, name)
def fset(self, value):
setattr(self, name, value)
if self.on_update is not None:
self.on_update(self)
return property(fget, fset)
#: The units to use, usually "bytes"
units = _callback_property("_units")
#: The start point of the range or `None`.
start = _callback_property("_start")
#: The stop point of the range (non-inclusive) or `None`. Can only be
#: `None` if also start is `None`.
stop = _callback_property("_stop")
#: The length of the range or `None`.
length = _callback_property("_length")
del _callback_property
def set(self, start, stop, length=None, units="bytes"):
"""Simple method to update the ranges."""
assert is_byte_range_valid(start, stop, length), "Bad range provided"
self._units = units
self._start = start
self._stop = stop
self._length = length
if self.on_update is not None:
self.on_update(self)
def unset(self):
"""Sets the units to `None` which indicates that the header should
no longer be used.
"""
self.set(None, None, units=None)
def to_header(self):
if self.units is None:
return ""
if self.length is None:
length = "*"
else:
length = self.length
if self.start is None:
return "%s */%s" % (self.units, length)
return "%s %s-%s/%s" % (self.units, self.start, self.stop - 1, length)
def __nonzero__(self):
return self.units is not None
__bool__ = __nonzero__
def __str__(self):
return self.to_header()
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, str(self))
class Authorization(ImmutableDictMixin, dict):
"""Represents an `Authorization` header sent by the client. You should
not create this kind of object yourself but use it when it's returned by
the `parse_authorization_header` function.
This object is a dict subclass and can be altered by setting dict items
but it should be considered immutable as it's returned by the client and
not meant for modifications.
.. versionchanged:: 0.5
This object became immutable.
"""
def __init__(self, auth_type, data=None):
dict.__init__(self, data or {})
self.type = auth_type
@property
def username(self):
"""The username transmitted. This is set for both basic and digest
auth all the time.
"""
return self.get("username")
@property
def password(self):
"""When the authentication type is basic this is the password
transmitted by the client, else `None`.
"""
return self.get("password")
@property
def realm(self):
"""This is the server realm sent back for HTTP digest auth."""
return self.get("realm")
@property
def nonce(self):
"""The nonce the server sent for digest auth, sent back by the client.
A nonce should be unique for every 401 response for HTTP digest auth.
"""
return self.get("nonce")
@property
def uri(self):
"""The URI from Request-URI of the Request-Line; duplicated because
proxies are allowed to change the Request-Line in transit. HTTP
digest auth only.
"""
return self.get("uri")
@property
def nc(self):
"""The nonce count value transmitted by clients if a qop-header is
also transmitted. HTTP digest auth only.
"""
return self.get("nc")
@property
def cnonce(self):
"""If the server sent a qop-header in the ``WWW-Authenticate``
header, the client has to provide this value for HTTP digest auth.
See the RFC for more details.
"""
return self.get("cnonce")
@property
def response(self):
"""A string of 32 hex digits computed as defined in RFC 2617, which
proves that the user knows a password. Digest auth only.
"""
return self.get("response")
@property
def opaque(self):
"""The opaque header from the server returned unchanged by the client.
It is recommended that this string be base64 or hexadecimal data.
Digest auth only.
"""
return self.get("opaque")
@property
def qop(self):
"""Indicates what "quality of protection" the client has applied to
the message for HTTP digest auth. Note that this is a single token,
not a quoted list of alternatives as in WWW-Authenticate.
"""
return self.get("qop")
class WWWAuthenticate(UpdateDictMixin, dict):
"""Provides simple access to `WWW-Authenticate` headers."""
#: list of keys that require quoting in the generated header
_require_quoting = frozenset(["domain", "nonce", "opaque", "realm", "qop"])
def __init__(self, auth_type=None, values=None, on_update=None):
dict.__init__(self, values or ())
if auth_type:
self["__auth_type__"] = auth_type
self.on_update = on_update
def set_basic(self, realm="authentication required"):
"""Clear the auth info and enable basic auth."""
dict.clear(self)
dict.update(self, {"__auth_type__": "basic", "realm": realm})
if self.on_update:
self.on_update(self)
def set_digest(
self, realm, nonce, qop=("auth",), opaque=None, algorithm=None, stale=False
):
"""Clear the auth info and enable digest auth."""
d = {
"__auth_type__": "digest",
"realm": realm,
"nonce": nonce,
"qop": dump_header(qop),
}
if stale:
d["stale"] = "TRUE"
if opaque is not None:
d["opaque"] = opaque
if algorithm is not None:
d["algorithm"] = algorithm
dict.clear(self)
dict.update(self, d)
if self.on_update:
self.on_update(self)
def to_header(self):
"""Convert the stored values into a WWW-Authenticate header."""
d = dict(self)
auth_type = d.pop("__auth_type__", None) or "basic"
return "%s %s" % (
auth_type.title(),
", ".join(
[
"%s=%s"
% (
key,
quote_header_value(
value, allow_token=key not in self._require_quoting
),
)
for key, value in iteritems(d)
]
),
)
def __str__(self):
return self.to_header()
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.to_header())
def auth_property(name, doc=None): # noqa: B902
"""A static helper function for subclasses to add extra authentication
system properties onto a class::
class FooAuthenticate(WWWAuthenticate):
special_realm = auth_property('special_realm')
For more information have a look at the sourcecode to see how the
regular properties (:attr:`realm` etc.) are implemented.
"""
def _set_value(self, value):
if value is None:
self.pop(name, None)
else:
self[name] = str(value)
return property(lambda x: x.get(name), _set_value, doc=doc)
def _set_property(name, doc=None): # noqa: B902
def fget(self):
def on_update(header_set):
if not header_set and name in self:
del self[name]
elif header_set:
self[name] = header_set.to_header()
return parse_set_header(self.get(name), on_update)
return property(fget, doc=doc)
type = auth_property(
"__auth_type__",
doc="""The type of the auth mechanism. HTTP currently specifies
``Basic`` and ``Digest``.""",
)
realm = auth_property(
"realm",
doc="""A string to be displayed to users so they know which
username and password to use. This string should contain at
least the name of the host performing the authentication and
might additionally indicate the collection of users who might
have access.""",
)
domain = _set_property(
"domain",
doc="""A list of URIs that define the protection space. If a URI
is an absolute path, it is relative to the canonical root URL of
the server being accessed.""",
)
nonce = auth_property(
"nonce",
doc="""
A server-specified data string which should be uniquely generated
each time a 401 response is made. It is recommended that this
string be base64 or hexadecimal data.""",
)
opaque = auth_property(
"opaque",
doc="""A string of data, specified by the server, which should
be returned by the client unchanged in the Authorization header
of subsequent requests with URIs in the same protection space.
It is recommended that this string be base64 or hexadecimal
data.""",
)
algorithm = auth_property(
"algorithm",
doc="""A string indicating a pair of algorithms used to produce
the digest and a checksum. If this is not present it is assumed
to be "MD5". If the algorithm is not understood, the challenge
should be ignored (and a different one used, if there is more
than one).""",
)
qop = _set_property(
"qop",
doc="""A set of quality-of-privacy directives such as auth and
auth-int.""",
)
@property
def stale(self):
"""A flag, indicating that the previous request from the client
was rejected because the nonce value was stale.
"""
val = self.get("stale")
if val is not None:
return val.lower() == "true"
@stale.setter
def stale(self, value):
if value is None:
self.pop("stale", None)
else:
self["stale"] = "TRUE" if value else "FALSE"
auth_property = staticmethod(auth_property)
del _set_property
class FileStorage(object):
"""The :class:`FileStorage` class is a thin wrapper over incoming files.
It is used by the request object to represent uploaded files. All the
attributes of the wrapper stream are proxied by the file storage so
it's possible to do ``storage.read()`` instead of the long form
``storage.stream.read()``.
"""
def __init__(
self,
stream=None,
filename=None,
name=None,
content_type=None,
content_length=None,
headers=None,
):
self.name = name
self.stream = stream or BytesIO()
# if no filename is provided we can attempt to get the filename
# from the stream object passed. There we have to be careful to
# skip things like <fdopen>, <stderr> etc. Python marks these
# special filenames with angular brackets.
if filename is None:
filename = getattr(stream, "name", None)
s = make_literal_wrapper(filename)
if filename and filename[0] == s("<") and filename[-1] == s(">"):
filename = None
# On Python 3 we want to make sure the filename is always unicode.
# This might not be if the name attribute is bytes due to the
# file being opened from the bytes API.
if not PY2 and isinstance(filename, bytes):
filename = filename.decode(get_filesystem_encoding(), "replace")
self.filename = filename
if headers is None:
headers = Headers()
self.headers = headers
if content_type is not None:
headers["Content-Type"] = content_type
if content_length is not None:
headers["Content-Length"] = str(content_length)
def _parse_content_type(self):
if not hasattr(self, "_parsed_content_type"):
self._parsed_content_type = parse_options_header(self.content_type)
@property
def content_type(self):
"""The content-type sent in the header. Usually not available"""
return self.headers.get("content-type")
@property
def content_length(self):
"""The content-length sent in the header. Usually not available"""
return int(self.headers.get("content-length") or 0)
@property
def mimetype(self):
"""Like :attr:`content_type`, but without parameters (eg, without
charset, type etc.) and always lowercase. For example if the content
type is ``text/HTML; charset=utf-8`` the mimetype would be
``'text/html'``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[0].lower()
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[1]
def save(self, dst, buffer_size=16384):
"""Save the file to a destination path or file object. If the
destination is a file object you have to close it yourself after the
call. The buffer size is the number of bytes held in memory during
the copy process. It defaults to 16KB.
For secure file saving also have a look at :func:`secure_filename`.
:param dst: a filename, :class:`os.PathLike`, or open file
object to write to.
:param buffer_size: Passed as the ``length`` parameter of
:func:`shutil.copyfileobj`.
.. versionchanged:: 1.0
Supports :mod:`pathlib`.
"""
from shutil import copyfileobj
close_dst = False
if hasattr(dst, "__fspath__"):
dst = fspath(dst)
if isinstance(dst, string_types):
dst = open(dst, "wb")
close_dst = True
try:
copyfileobj(self.stream, dst, buffer_size)
finally:
if close_dst:
dst.close()
def close(self):
"""Close the underlying file if possible."""
try:
self.stream.close()
except Exception:
pass
def __nonzero__(self):
return bool(self.filename)
__bool__ = __nonzero__
def __getattr__(self, name):
try:
return getattr(self.stream, name)
except AttributeError:
# SpooledTemporaryFile doesn't implement IOBase, get the
# attribute from its backing file instead.
# https://github.com/python/cpython/pull/3249
if hasattr(self.stream, "_file"):
return getattr(self.stream._file, name)
raise
def __iter__(self):
return iter(self.stream)
def __repr__(self):
return "<%s: %r (%r)>" % (
self.__class__.__name__,
self.filename,
self.content_type,
)
# circular dependencies
from .http import dump_csp_header
from .http import dump_header
from .http import dump_options_header
from .http import generate_etag
from .http import http_date
from .http import is_byte_range_valid
from .http import parse_options_header
from .http import parse_set_header
from .http import quote_etag
from .http import quote_header_value
from .http import unquote_etag
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/urls.py
|
# -*- coding: utf-8 -*-
"""
werkzeug.urls
~~~~~~~~~~~~~
``werkzeug.urls`` used to provide several wrapper functions for Python 2
urlparse, whose main purpose were to work around the behavior of the Py2
stdlib and its lack of unicode support. While this was already a somewhat
inconvenient situation, it got even more complicated because Python 3's
``urllib.parse`` actually does handle unicode properly. In other words,
this module would wrap two libraries with completely different behavior. So
now this module contains a 2-and-3-compatible backport of Python 3's
``urllib.parse``, which is mostly API-compatible.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import os
import re
from collections import namedtuple
from ._compat import fix_tuple_repr
from ._compat import implements_to_string
from ._compat import make_literal_wrapper
from ._compat import normalize_string_tuple
from ._compat import PY2
from ._compat import text_type
from ._compat import to_native
from ._compat import to_unicode
from ._compat import try_coerce_native
from ._internal import _decode_idna
from ._internal import _encode_idna
# A regular expression for what a valid schema looks like
_scheme_re = re.compile(r"^[a-zA-Z0-9+-.]+$")
# Characters that are safe in any part of an URL.
_always_safe = frozenset(
bytearray(
b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789"
b"-._~"
)
)
_hexdigits = "0123456789ABCDEFabcdef"
_hextobyte = dict(
((a + b).encode(), int(a + b, 16)) for a in _hexdigits for b in _hexdigits
)
_bytetohex = [("%%%02X" % char).encode("ascii") for char in range(256)]
_URLTuple = fix_tuple_repr(
namedtuple("_URLTuple", ["scheme", "netloc", "path", "query", "fragment"])
)
class BaseURL(_URLTuple):
"""Superclass of :py:class:`URL` and :py:class:`BytesURL`."""
__slots__ = ()
def replace(self, **kwargs):
"""Return an URL with the same values, except for those parameters
given new values by whichever keyword arguments are specified."""
return self._replace(**kwargs)
@property
def host(self):
"""The host part of the URL if available, otherwise `None`. The
host is either the hostname or the IP address mentioned in the
URL. It will not contain the port.
"""
return self._split_host()[0]
@property
def ascii_host(self):
"""Works exactly like :attr:`host` but will return a result that
is restricted to ASCII. If it finds a netloc that is not ASCII
it will attempt to idna decode it. This is useful for socket
operations when the URL might include internationalized characters.
"""
rv = self.host
if rv is not None and isinstance(rv, text_type):
try:
rv = _encode_idna(rv)
except UnicodeError:
rv = rv.encode("ascii", "ignore")
return to_native(rv, "ascii", "ignore")
@property
def port(self):
"""The port in the URL as an integer if it was present, `None`
otherwise. This does not fill in default ports.
"""
try:
rv = int(to_native(self._split_host()[1]))
if 0 <= rv <= 65535:
return rv
except (ValueError, TypeError):
pass
@property
def auth(self):
"""The authentication part in the URL if available, `None`
otherwise.
"""
return self._split_netloc()[0]
@property
def username(self):
"""The username if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[0]
if rv is not None:
return _url_unquote_legacy(rv)
@property
def raw_username(self):
"""The username if it was part of the URL, `None` otherwise.
Unlike :attr:`username` this one is not being decoded.
"""
return self._split_auth()[0]
@property
def password(self):
"""The password if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[1]
if rv is not None:
return _url_unquote_legacy(rv)
@property
def raw_password(self):
"""The password if it was part of the URL, `None` otherwise.
Unlike :attr:`password` this one is not being decoded.
"""
return self._split_auth()[1]
def decode_query(self, *args, **kwargs):
"""Decodes the query part of the URL. Ths is a shortcut for
calling :func:`url_decode` on the query argument. The arguments and
keyword arguments are forwarded to :func:`url_decode` unchanged.
"""
return url_decode(self.query, *args, **kwargs)
def join(self, *args, **kwargs):
"""Joins this URL with another one. This is just a convenience
function for calling into :meth:`url_join` and then parsing the
return value again.
"""
return url_parse(url_join(self, *args, **kwargs))
def to_url(self):
"""Returns a URL string or bytes depending on the type of the
information stored. This is just a convenience function
for calling :meth:`url_unparse` for this URL.
"""
return url_unparse(self)
def decode_netloc(self):
"""Decodes the netloc part into a string."""
rv = _decode_idna(self.host or "")
if ":" in rv:
rv = "[%s]" % rv
port = self.port
if port is not None:
rv = "%s:%d" % (rv, port)
auth = ":".join(
filter(
None,
[
_url_unquote_legacy(self.raw_username or "", "/:%@"),
_url_unquote_legacy(self.raw_password or "", "/:%@"),
],
)
)
if auth:
rv = "%s@%s" % (auth, rv)
return rv
def to_uri_tuple(self):
"""Returns a :class:`BytesURL` tuple that holds a URI. This will
encode all the information in the URL properly to ASCII using the
rules a web browser would follow.
It's usually more interesting to directly call :meth:`iri_to_uri` which
will return a string.
"""
return url_parse(iri_to_uri(self).encode("ascii"))
def to_iri_tuple(self):
"""Returns a :class:`URL` tuple that holds a IRI. This will try
to decode as much information as possible in the URL without
losing information similar to how a web browser does it for the
URL bar.
It's usually more interesting to directly call :meth:`uri_to_iri` which
will return a string.
"""
return url_parse(uri_to_iri(self))
def get_file_location(self, pathformat=None):
"""Returns a tuple with the location of the file in the form
``(server, location)``. If the netloc is empty in the URL or
points to localhost, it's represented as ``None``.
The `pathformat` by default is autodetection but needs to be set
when working with URLs of a specific system. The supported values
are ``'windows'`` when working with Windows or DOS paths and
``'posix'`` when working with posix paths.
If the URL does not point to a local file, the server and location
are both represented as ``None``.
:param pathformat: The expected format of the path component.
Currently ``'windows'`` and ``'posix'`` are
supported. Defaults to ``None`` which is
autodetect.
"""
if self.scheme != "file":
return None, None
path = url_unquote(self.path)
host = self.netloc or None
if pathformat is None:
if os.name == "nt":
pathformat = "windows"
else:
pathformat = "posix"
if pathformat == "windows":
if path[:1] == "/" and path[1:2].isalpha() and path[2:3] in "|:":
path = path[1:2] + ":" + path[3:]
windows_share = path[:3] in ("\\" * 3, "/" * 3)
import ntpath
path = ntpath.normpath(path)
# Windows shared drives are represented as ``\\host\\directory``.
# That results in a URL like ``file://///host/directory``, and a
# path like ``///host/directory``. We need to special-case this
# because the path contains the hostname.
if windows_share and host is None:
parts = path.lstrip("\\").split("\\", 1)
if len(parts) == 2:
host, path = parts
else:
host = parts[0]
path = ""
elif pathformat == "posix":
import posixpath
path = posixpath.normpath(path)
else:
raise TypeError("Invalid path format %s" % repr(pathformat))
if host in ("127.0.0.1", "::1", "localhost"):
host = None
return host, path
def _split_netloc(self):
if self._at in self.netloc:
return self.netloc.split(self._at, 1)
return None, self.netloc
def _split_auth(self):
auth = self._split_netloc()[0]
if not auth:
return None, None
if self._colon not in auth:
return auth, None
return auth.split(self._colon, 1)
def _split_host(self):
rv = self._split_netloc()[1]
if not rv:
return None, None
if not rv.startswith(self._lbracket):
if self._colon in rv:
return rv.split(self._colon, 1)
return rv, None
idx = rv.find(self._rbracket)
if idx < 0:
return rv, None
host = rv[1:idx]
rest = rv[idx + 1 :]
if rest.startswith(self._colon):
return host, rest[1:]
return host, None
@implements_to_string
class URL(BaseURL):
"""Represents a parsed URL. This behaves like a regular tuple but
also has some extra attributes that give further insight into the
URL.
"""
__slots__ = ()
_at = "@"
_colon = ":"
_lbracket = "["
_rbracket = "]"
def __str__(self):
return self.to_url()
def encode_netloc(self):
"""Encodes the netloc part to an ASCII safe URL as bytes."""
rv = self.ascii_host or ""
if ":" in rv:
rv = "[%s]" % rv
port = self.port
if port is not None:
rv = "%s:%d" % (rv, port)
auth = ":".join(
filter(
None,
[
url_quote(self.raw_username or "", "utf-8", "strict", "/:%"),
url_quote(self.raw_password or "", "utf-8", "strict", "/:%"),
],
)
)
if auth:
rv = "%s@%s" % (auth, rv)
return to_native(rv)
def encode(self, charset="utf-8", errors="replace"):
"""Encodes the URL to a tuple made out of bytes. The charset is
only being used for the path, query and fragment.
"""
return BytesURL(
self.scheme.encode("ascii"),
self.encode_netloc(),
self.path.encode(charset, errors),
self.query.encode(charset, errors),
self.fragment.encode(charset, errors),
)
class BytesURL(BaseURL):
"""Represents a parsed URL in bytes."""
__slots__ = ()
_at = b"@"
_colon = b":"
_lbracket = b"["
_rbracket = b"]"
def __str__(self):
return self.to_url().decode("utf-8", "replace")
def encode_netloc(self):
"""Returns the netloc unchanged as bytes."""
return self.netloc
def decode(self, charset="utf-8", errors="replace"):
"""Decodes the URL to a tuple made out of strings. The charset is
only being used for the path, query and fragment.
"""
return URL(
self.scheme.decode("ascii"),
self.decode_netloc(),
self.path.decode(charset, errors),
self.query.decode(charset, errors),
self.fragment.decode(charset, errors),
)
_unquote_maps = {frozenset(): _hextobyte}
def _unquote_to_bytes(string, unsafe=""):
if isinstance(string, text_type):
string = string.encode("utf-8")
if isinstance(unsafe, text_type):
unsafe = unsafe.encode("utf-8")
unsafe = frozenset(bytearray(unsafe))
groups = iter(string.split(b"%"))
result = bytearray(next(groups, b""))
try:
hex_to_byte = _unquote_maps[unsafe]
except KeyError:
hex_to_byte = _unquote_maps[unsafe] = {
h: b for h, b in _hextobyte.items() if b not in unsafe
}
for group in groups:
code = group[:2]
if code in hex_to_byte:
result.append(hex_to_byte[code])
result.extend(group[2:])
else:
result.append(37) # %
result.extend(group)
return bytes(result)
def _url_encode_impl(obj, charset, encode_keys, sort, key):
from .datastructures import iter_multi_items
iterable = iter_multi_items(obj)
if sort:
iterable = sorted(iterable, key=key)
for key, value in iterable:
if value is None:
continue
if not isinstance(key, bytes):
key = text_type(key).encode(charset)
if not isinstance(value, bytes):
value = text_type(value).encode(charset)
yield _fast_url_quote_plus(key) + "=" + _fast_url_quote_plus(value)
def _url_unquote_legacy(value, unsafe=""):
try:
return url_unquote(value, charset="utf-8", errors="strict", unsafe=unsafe)
except UnicodeError:
return url_unquote(value, charset="latin1", unsafe=unsafe)
def url_parse(url, scheme=None, allow_fragments=True):
"""Parses a URL from a string into a :class:`URL` tuple. If the URL
is lacking a scheme it can be provided as second argument. Otherwise,
it is ignored. Optionally fragments can be stripped from the URL
by setting `allow_fragments` to `False`.
The inverse of this function is :func:`url_unparse`.
:param url: the URL to parse.
:param scheme: the default schema to use if the URL is schemaless.
:param allow_fragments: if set to `False` a fragment will be removed
from the URL.
"""
s = make_literal_wrapper(url)
is_text_based = isinstance(url, text_type)
if scheme is None:
scheme = s("")
netloc = query = fragment = s("")
i = url.find(s(":"))
if i > 0 and _scheme_re.match(to_native(url[:i], errors="replace")):
# make sure "iri" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i + 1 :]
if not rest or any(c not in s("0123456789") for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == s("//"):
delim = len(url)
for c in s("/?#"):
wdelim = url.find(c, 2)
if wdelim >= 0:
delim = min(delim, wdelim)
netloc, url = url[2:delim], url[delim:]
if (s("[") in netloc and s("]") not in netloc) or (
s("]") in netloc and s("[") not in netloc
):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and s("#") in url:
url, fragment = url.split(s("#"), 1)
if s("?") in url:
url, query = url.split(s("?"), 1)
result_type = URL if is_text_based else BytesURL
return result_type(scheme, netloc, url, query, fragment)
def _make_fast_url_quote(charset="utf-8", errors="strict", safe="/:", unsafe=""):
"""Precompile the translation table for a URL encoding function.
Unlike :func:`url_quote`, the generated function only takes the
string to quote.
:param charset: The charset to encode the result with.
:param errors: How to handle encoding errors.
:param safe: An optional sequence of safe characters to never encode.
:param unsafe: An optional sequence of unsafe characters to always encode.
"""
if isinstance(safe, text_type):
safe = safe.encode(charset, errors)
if isinstance(unsafe, text_type):
unsafe = unsafe.encode(charset, errors)
safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))
table = [chr(c) if c in safe else "%%%02X" % c for c in range(256)]
if not PY2:
def quote(string):
return "".join([table[c] for c in string])
else:
def quote(string):
return "".join([table[c] for c in bytearray(string)])
return quote
_fast_url_quote = _make_fast_url_quote()
_fast_quote_plus = _make_fast_url_quote(safe=" ", unsafe="+")
def _fast_url_quote_plus(string):
return _fast_quote_plus(string).replace(" ", "+")
def url_quote(string, charset="utf-8", errors="strict", safe="/:", unsafe=""):
"""URL encode a single string with a given encoding.
:param s: the string to quote.
:param charset: the charset to be used.
:param safe: an optional sequence of safe characters.
:param unsafe: an optional sequence of unsafe characters.
.. versionadded:: 0.9.2
The `unsafe` parameter was added.
"""
if not isinstance(string, (text_type, bytes, bytearray)):
string = text_type(string)
if isinstance(string, text_type):
string = string.encode(charset, errors)
if isinstance(safe, text_type):
safe = safe.encode(charset, errors)
if isinstance(unsafe, text_type):
unsafe = unsafe.encode(charset, errors)
safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))
rv = bytearray()
for char in bytearray(string):
if char in safe:
rv.append(char)
else:
rv.extend(_bytetohex[char])
return to_native(bytes(rv))
def url_quote_plus(string, charset="utf-8", errors="strict", safe=""):
"""URL encode a single string with the given encoding and convert
whitespace to "+".
:param s: The string to quote.
:param charset: The charset to be used.
:param safe: An optional sequence of safe characters.
"""
return url_quote(string, charset, errors, safe + " ", "+").replace(" ", "+")
def url_unparse(components):
"""The reverse operation to :meth:`url_parse`. This accepts arbitrary
as well as :class:`URL` tuples and returns a URL as a string.
:param components: the parsed URL as tuple which should be converted
into a URL string.
"""
scheme, netloc, path, query, fragment = normalize_string_tuple(components)
s = make_literal_wrapper(scheme)
url = s("")
# We generally treat file:///x and file:/x the same which is also
# what browsers seem to do. This also allows us to ignore a schema
# register for netloc utilization or having to differentiate between
# empty and missing netloc.
if netloc or (scheme and path.startswith(s("/"))):
if path and path[:1] != s("/"):
path = s("/") + path
url = s("//") + (netloc or s("")) + path
elif path:
url += path
if scheme:
url = scheme + s(":") + url
if query:
url = url + s("?") + query
if fragment:
url = url + s("#") + fragment
return url
def url_unquote(string, charset="utf-8", errors="replace", unsafe=""):
"""URL decode a single string with a given encoding. If the charset
is set to `None` no unicode decoding is performed and raw bytes
are returned.
:param s: the string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: the error handling for the charset decoding.
"""
rv = _unquote_to_bytes(string, unsafe)
if charset is not None:
rv = rv.decode(charset, errors)
return rv
def url_unquote_plus(s, charset="utf-8", errors="replace"):
"""URL decode a single string with the given `charset` and decode "+" to
whitespace.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
:param s: The string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: The error handling for the `charset` decoding.
"""
if isinstance(s, text_type):
s = s.replace(u"+", u" ")
else:
s = s.replace(b"+", b" ")
return url_unquote(s, charset, errors)
def url_fix(s, charset="utf-8"):
r"""Sometimes you get an URL by a user that just isn't a real URL because
it contains unsafe characters like ' ' and so on. This function can fix
some of the problems in a similar way browsers handle data entered by the
user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
:param s: the string with the URL to fix.
:param charset: The target charset for the URL if the url was given as
unicode string.
"""
# First step is to switch to unicode processing and to convert
# backslashes (which are invalid in URLs anyways) to slashes. This is
# consistent with what Chrome does.
s = to_unicode(s, charset, "replace").replace("\\", "/")
# For the specific case that we look like a malformed windows URL
# we want to fix this up manually:
if s.startswith("file://") and s[7:8].isalpha() and s[8:10] in (":/", "|/"):
s = "file:///" + s[7:]
url = url_parse(s)
path = url_quote(url.path, charset, safe="/%+$!*'(),")
qs = url_quote_plus(url.query, charset, safe=":&%=+$!*'(),")
anchor = url_quote_plus(url.fragment, charset, safe=":&%=+$!*'(),")
return to_native(url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor)))
# not-unreserved characters remain quoted when unquoting to IRI
_to_iri_unsafe = "".join([chr(c) for c in range(128) if c not in _always_safe])
def _codec_error_url_quote(e):
"""Used in :func:`uri_to_iri` after unquoting to re-quote any
invalid bytes.
"""
out = _fast_url_quote(e.object[e.start : e.end])
if PY2:
out = out.decode("utf-8")
return out, e.end
codecs.register_error("werkzeug.url_quote", _codec_error_url_quote)
def uri_to_iri(uri, charset="utf-8", errors="werkzeug.url_quote"):
"""Convert a URI to an IRI. All valid UTF-8 characters are unquoted,
leaving all reserved and invalid characters quoted. If the URL has
a domain, it is decoded from Punycode.
>>> uri_to_iri("http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF")
'http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF'
:param uri: The URI to convert.
:param charset: The encoding to encode unquoted bytes with.
:param errors: Error handler to use during ``bytes.encode``. By
default, invalid bytes are left quoted.
.. versionchanged:: 0.15
All reserved and invalid characters remain quoted. Previously,
only some reserved characters were preserved, and invalid bytes
were replaced instead of left quoted.
.. versionadded:: 0.6
"""
if isinstance(uri, tuple):
uri = url_unparse(uri)
uri = url_parse(to_unicode(uri, charset))
path = url_unquote(uri.path, charset, errors, _to_iri_unsafe)
query = url_unquote(uri.query, charset, errors, _to_iri_unsafe)
fragment = url_unquote(uri.fragment, charset, errors, _to_iri_unsafe)
return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment))
# reserved characters remain unquoted when quoting to URI
_to_uri_safe = ":/?#[]@!$&'()*+,;=%"
def iri_to_uri(iri, charset="utf-8", errors="strict", safe_conversion=False):
"""Convert an IRI to a URI. All non-ASCII and unsafe characters are
quoted. If the URL has a domain, it is encoded to Punycode.
>>> iri_to_uri('http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF')
'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF'
:param iri: The IRI to convert.
:param charset: The encoding of the IRI.
:param errors: Error handler to use during ``bytes.encode``.
:param safe_conversion: Return the URL unchanged if it only contains
ASCII characters and no whitespace. See the explanation below.
There is a general problem with IRI conversion with some protocols
that are in violation of the URI specification. Consider the
following two IRIs::
magnet:?xt=uri:whatever
itms-services://?action=download-manifest
After parsing, we don't know if the scheme requires the ``//``,
which is dropped if empty, but conveys different meanings in the
final URL if it's present or not. In this case, you can use
``safe_conversion``, which will return the URL unchanged if it only
contains ASCII characters and no whitespace. This can result in a
URI with unquoted characters if it was not already quoted correctly,
but preserves the URL's semantics. Werkzeug uses this for the
``Location`` header for redirects.
.. versionchanged:: 0.15
All reserved characters remain unquoted. Previously, only some
reserved characters were left unquoted.
.. versionchanged:: 0.9.6
The ``safe_conversion`` parameter was added.
.. versionadded:: 0.6
"""
if isinstance(iri, tuple):
iri = url_unparse(iri)
if safe_conversion:
# If we're not sure if it's safe to convert the URL, and it only
# contains ASCII characters, return it unconverted.
try:
native_iri = to_native(iri)
ascii_iri = native_iri.encode("ascii")
# Only return if it doesn't have whitespace. (Why?)
if len(ascii_iri.split()) == 1:
return native_iri
except UnicodeError:
pass
iri = url_parse(to_unicode(iri, charset, errors))
path = url_quote(iri.path, charset, errors, _to_uri_safe)
query = url_quote(iri.query, charset, errors, _to_uri_safe)
fragment = url_quote(iri.fragment, charset, errors, _to_uri_safe)
return to_native(
url_unparse((iri.scheme, iri.encode_netloc(), path, query, fragment))
)
def url_decode(
s,
charset="utf-8",
decode_keys=False,
include_empty=True,
errors="replace",
separator="&",
cls=None,
):
"""
Parse a querystring and return it as :class:`MultiDict`. There is a
difference in key decoding on different Python versions. On Python 3
keys will always be fully decoded whereas on Python 2, keys will
remain bytestrings if they fit into ASCII. On 2.x keys can be forced
to be unicode by setting `decode_keys` to `True`.
If the charset is set to `None` no unicode decoding will happen and
raw bytes will be returned.
Per default a missing value for a key will default to an empty key. If
you don't want that behavior you can set `include_empty` to `False`.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
In previous versions ";" and "&" could be used for url decoding.
This changed in 0.5 where only "&" is supported. If you want to
use ";" instead a different `separator` can be provided.
The `cls` parameter was added.
:param s: a string with the query string to decode.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`
then keys will be unicode in all cases. Otherwise,
they remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
"""
if cls is None:
from .datastructures import MultiDict
cls = MultiDict
if isinstance(s, text_type) and not isinstance(separator, text_type):
separator = separator.decode(charset or "ascii")
elif isinstance(s, bytes) and not isinstance(separator, bytes):
separator = separator.encode(charset or "ascii")
return cls(
_url_decode_impl(
s.split(separator), charset, decode_keys, include_empty, errors
)
)
def url_decode_stream(
stream,
charset="utf-8",
decode_keys=False,
include_empty=True,
errors="replace",
separator="&",
cls=None,
limit=None,
return_iterator=False,
):
"""Works like :func:`url_decode` but decodes a stream. The behavior
of stream and limit follows functions like
:func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is
directly fed to the `cls` so you can consume the data while it's
parsed.
.. versionadded:: 0.8
:param stream: a stream with the encoded querystring
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`,
keys will be unicode in all cases. Otherwise, they
remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param limit: the content length of the URL data. Not necessary if
a limited stream is provided.
:param return_iterator: if set to `True` the `cls` argument is ignored
and an iterator over all decoded pairs is
returned
"""
from .wsgi import make_chunk_iter
pair_iter = make_chunk_iter(stream, separator, limit)
decoder = _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors)
if return_iterator:
return decoder
if cls is None:
from .datastructures import MultiDict
cls = MultiDict
return cls(decoder)
def _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors):
for pair in pair_iter:
if not pair:
continue
s = make_literal_wrapper(pair)
equal = s("=")
if equal in pair:
key, value = pair.split(equal, 1)
else:
if not include_empty:
continue
key = pair
value = s("")
key = url_unquote_plus(key, charset, errors)
if charset is not None and PY2 and not decode_keys:
key = try_coerce_native(key)
yield key, url_unquote_plus(value, charset, errors)
def url_encode(
obj, charset="utf-8", encode_keys=False, sort=False, key=None, separator=b"&"
):
"""URL encode a dict/`MultiDict`. If a value is `None` it will not appear
in the result string. Per default only values are encoded into the target
charset strings. If `encode_keys` is set to ``True`` unicode keys are
supported too.
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm.
.. versionadded:: 0.5
`sort`, `key`, and `separator` were added.
:param obj: the object to encode into a query string.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, "ascii")
return separator.join(_url_encode_impl(obj, charset, encode_keys, sort, key))
def url_encode_stream(
obj,
stream=None,
charset="utf-8",
encode_keys=False,
sort=False,
key=None,
separator=b"&",
):
"""Like :meth:`url_encode` but writes the results to a stream
object. If the stream is `None` a generator over all encoded
pairs is returned.
.. versionadded:: 0.8
:param obj: the object to encode into a query string.
:param stream: a stream to write the encoded object into or `None` if
an iterator over the encoded pairs should be returned. In
that case the separator argument is ignored.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, "ascii")
gen = _url_encode_impl(obj, charset, encode_keys, sort, key)
if stream is None:
return gen
for idx, chunk in enumerate(gen):
if idx:
stream.write(separator)
stream.write(chunk)
def url_join(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter.
:param base: the base URL for the join operation.
:param url: the URL to join.
:param allow_fragments: indicates whether fragments should be allowed.
"""
if isinstance(base, tuple):
base = url_unparse(base)
if isinstance(url, tuple):
url = url_unparse(url)
base, url = normalize_string_tuple((base, url))
s = make_literal_wrapper(base)
if not base:
return url
if not url:
return base
bscheme, bnetloc, bpath, bquery, bfragment = url_parse(
base, allow_fragments=allow_fragments
)
scheme, netloc, path, query, fragment = url_parse(url, bscheme, allow_fragments)
if scheme != bscheme:
return url
if netloc:
return url_unparse((scheme, netloc, path, query, fragment))
netloc = bnetloc
if path[:1] == s("/"):
segments = path.split(s("/"))
elif not path:
segments = bpath.split(s("/"))
if not query:
query = bquery
else:
segments = bpath.split(s("/"))[:-1] + path.split(s("/"))
# If the rightmost part is "./" we want to keep the slash but
# remove the dot.
if segments[-1] == s("."):
segments[-1] = s("")
# Resolve ".." and "."
segments = [segment for segment in segments if segment != s(".")]
while 1:
i = 1
n = len(segments) - 1
while i < n:
if segments[i] == s("..") and segments[i - 1] not in (s(""), s("..")):
del segments[i - 1 : i + 1]
break
i += 1
else:
break
# Remove trailing ".." if the URL is absolute
unwanted_marker = [s(""), s("..")]
while segments[:2] == unwanted_marker:
del segments[1]
path = s("/").join(segments)
return url_unparse((scheme, netloc, path, query, fragment))
class Href(object):
"""Implements a callable that constructs URLs with the given base. The
function can be called with any number of positional and keyword
arguments which than are used to assemble the URL. Works with URLs
and posix paths.
Positional arguments are appended as individual segments to
the path of the URL:
>>> href = Href('/foo')
>>> href('bar', 23)
'/foo/bar/23'
>>> href('foo', bar=23)
'/foo/foo?bar=23'
If any of the arguments (positional or keyword) evaluates to `None` it
will be skipped. If no keyword arguments are given the last argument
can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass),
otherwise the keyword arguments are used for the query parameters, cutting
off the first trailing underscore of the parameter name:
>>> href(is_=42)
'/foo?is=42'
>>> href({'foo': 'bar'})
'/foo?foo=bar'
Combining of both methods is not allowed:
>>> href({'foo': 'bar'}, bar=42)
Traceback (most recent call last):
...
TypeError: keyword arguments and query-dicts can't be combined
Accessing attributes on the href object creates a new href object with
the attribute name as prefix:
>>> bar_href = href.bar
>>> bar_href("blub")
'/foo/bar/blub'
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm:
>>> href = Href("/", sort=True)
>>> href(a=1, b=2, c=3)
'/?a=1&b=2&c=3'
.. versionadded:: 0.5
`sort` and `key` were added.
"""
def __init__(self, base="./", charset="utf-8", sort=False, key=None):
if not base:
base = "./"
self.base = base
self.charset = charset
self.sort = sort
self.key = key
def __getattr__(self, name):
if name[:2] == "__":
raise AttributeError(name)
base = self.base
if base[-1:] != "/":
base += "/"
return Href(url_join(base, name), self.charset, self.sort, self.key)
def __call__(self, *path, **query):
if path and isinstance(path[-1], dict):
if query:
raise TypeError("keyword arguments and query-dicts can't be combined")
query, path = path[-1], path[:-1]
elif query:
query = dict(
[(k.endswith("_") and k[:-1] or k, v) for k, v in query.items()]
)
path = "/".join(
[
to_unicode(url_quote(x, self.charset), "ascii")
for x in path
if x is not None
]
).lstrip("/")
rv = self.base
if path:
if not rv.endswith("/"):
rv += "/"
rv = url_join(rv, "./" + path)
if query:
rv += "?" + to_unicode(
url_encode(query, self.charset, sort=self.sort, key=self.key), "ascii"
)
return to_native(rv)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/wsgi.py
|
# -*- coding: utf-8 -*-
"""
werkzeug.wsgi
~~~~~~~~~~~~~
This module implements WSGI related helpers.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import io
import re
from functools import partial
from functools import update_wrapper
from itertools import chain
from ._compat import BytesIO
from ._compat import implements_iterator
from ._compat import make_literal_wrapper
from ._compat import string_types
from ._compat import text_type
from ._compat import to_bytes
from ._compat import to_unicode
from ._compat import try_coerce_native
from ._compat import wsgi_get_bytes
from ._internal import _encode_idna
from .urls import uri_to_iri
from .urls import url_join
from .urls import url_parse
from .urls import url_quote
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(
environ,
root_only=False,
strip_querystring=False,
host_only=False,
trusted_hosts=None,
):
"""A handy helper function that recreates the full URL as IRI for the
current request or parts of it. Here's an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
This optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
Note that the string returned might contain unicode characters as the
representation is an IRI not an URI. If you need an ASCII only
representation you can use the :func:`~werkzeug.urls.iri_to_uri`
function:
>>> from werkzeug.urls import iri_to_uri
>>> iri_to_uri(get_current_url(env))
'http://localhost/script/?param=foo'
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
tmp = [environ["wsgi.url_scheme"], "://", get_host(environ, trusted_hosts)]
cat = tmp.append
if host_only:
return uri_to_iri("".join(tmp) + "/")
cat(url_quote(wsgi_get_bytes(environ.get("SCRIPT_NAME", ""))).rstrip("/"))
cat("/")
if not root_only:
cat(url_quote(wsgi_get_bytes(environ.get("PATH_INFO", "")).lstrip(b"/")))
if not strip_querystring:
qs = get_query_string(environ)
if qs:
cat("?" + qs)
return uri_to_iri("".join(tmp))
def host_is_trusted(hostname, trusted_list):
"""Checks if a host is trusted against a list. This also takes care
of port normalization.
.. versionadded:: 0.9
:param hostname: the hostname to check
:param trusted_list: a list of hostnames to check against. If a
hostname starts with a dot it will match against
all subdomains as well.
"""
if not hostname:
return False
if isinstance(trusted_list, string_types):
trusted_list = [trusted_list]
def _normalize(hostname):
if ":" in hostname:
hostname = hostname.rsplit(":", 1)[0]
return _encode_idna(hostname)
try:
hostname = _normalize(hostname)
except UnicodeError:
return False
for ref in trusted_list:
if ref.startswith("."):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
try:
ref = _normalize(ref)
except UnicodeError:
return False
if ref == hostname:
return True
if suffix_match and hostname.endswith(b"." + ref):
return True
return False
def get_host(environ, trusted_hosts=None):
"""Return the host for the given WSGI environment. This first checks
the ``Host`` header. If it's not present, then ``SERVER_NAME`` and
``SERVER_PORT`` are used. The host will only contain the port if it
is different than the standard port for the protocol.
Optionally, verify that the host is trusted using
:func:`host_is_trusted` and raise a
:exc:`~werkzeug.exceptions.SecurityError` if it is not.
:param environ: The WSGI environment to get the host from.
:param trusted_hosts: A list of trusted hosts.
:return: Host, with port if necessary.
:raise ~werkzeug.exceptions.SecurityError: If the host is not
trusted.
"""
if "HTTP_HOST" in environ:
rv = environ["HTTP_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
else:
rv = environ["SERVER_NAME"]
if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
("https", "443"),
("http", "80"),
):
rv += ":" + environ["SERVER_PORT"]
if trusted_hosts is not None:
if not host_is_trusted(rv, trusted_hosts):
from .exceptions import SecurityError
raise SecurityError('Host "%s" is not trusted' % rv)
return rv
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available or chunked transfer encoding is used,
``None`` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
if environ.get("HTTP_TRANSFER_ENCODING", "") == "chunked":
return None
content_length = environ.get("CONTENT_LENGTH")
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
def get_input_stream(environ, safe_fallback=True):
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
If content length is not set, the stream will be empty for safety reasons.
If the WSGI server supports chunked or infinite streams, it should set
the ``wsgi.input_terminated`` value in the WSGI environ to indicate that.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe_fallback: use an empty stream as a safe fallback when the
content length is not set. Disabling this allows infinite streams,
which can be a denial-of-service risk.
"""
stream = environ["wsgi.input"]
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can safely
# read it until the end.
if environ.get("wsgi.input_terminated"):
return stream
# If the request doesn't specify a content length, returning the stream is
# potentially dangerous because it could be infinite, malicious or not. If
# safe_fallback is true, return an empty stream instead for safety.
if content_length is None:
return BytesIO() if safe_fallback else stream
# Otherwise limit the stream to the content length
return LimitedStream(stream, content_length)
def get_query_string(environ):
"""Returns the `QUERY_STRING` from the WSGI environment. This also takes
care about the WSGI decoding dance on Python 3 environments as a
native string. The string returned will be restricted to ASCII
characters.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the query string from.
"""
qs = wsgi_get_bytes(environ.get("QUERY_STRING", ""))
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return try_coerce_native(url_quote(qs, safe=":&%=+$!*'(),"))
def get_path_info(environ, charset="utf-8", errors="replace"):
"""Returns the `PATH_INFO` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path info, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get("PATH_INFO", ""))
return to_unicode(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset="utf-8", errors="replace"):
"""Returns the `SCRIPT_NAME` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get("SCRIPT_NAME", ""))
return to_unicode(path, charset, errors, allow_none_charset=True)
def pop_path_info(environ, charset="utf-8", errors="replace"):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` a bytestring is returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
"""
path = environ.get("PATH_INFO")
if not path:
return None
script_name = environ.get("SCRIPT_NAME", "")
# shift multiple leading slashes over
old_path = path
path = path.lstrip("/")
if path != old_path:
script_name += "/" * (len(old_path) - len(path))
if "/" not in path:
environ["PATH_INFO"] = ""
environ["SCRIPT_NAME"] = script_name + path
rv = wsgi_get_bytes(path)
else:
segment, path = path.split("/", 1)
environ["PATH_INFO"] = "/" + path
environ["SCRIPT_NAME"] = script_name + segment
rv = wsgi_get_bytes(segment)
return to_unicode(rv, charset, errors, allow_none_charset=True)
def peek_path_info(environ, charset="utf-8", errors="replace"):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` a bytestring is returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get("PATH_INFO", "").lstrip("/").split("/", 1)
if segments:
return to_unicode(
wsgi_get_bytes(segments[0]), charset, errors, allow_none_charset=True
)
def extract_path_info(
environ_or_baseurl,
path_or_url,
charset="utf-8",
errors="werkzeug.url_quote",
collapse_http_schemes=True,
):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
.. versionchanged:: 0.15
The ``errors`` parameter defaults to leaving invalid bytes
quoted instead of replacing them.
.. versionadded:: 0.6
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u"@", 1)[-1].split(u":", 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u"http" and port == u"80") or (
scheme == u"https" and port == u"443"
):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u":" + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl, root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u"http", u"https"):
return None
else:
if not (base_scheme in (u"http", u"https") and base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u"/")
if not cur_path.startswith(base_path):
return None
return u"/" + cur_path[len(base_path) :].lstrip(u"/")
@implements_iterator
class ClosingIterator(object):
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of the iterable returned by the application.
Because it is useful to add another close action to a returned iterable
and adding a custom iterable is a boring task this class can be used for
that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = partial(next, iterator)
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterable, "close", None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def __next__(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get("wsgi.file_wrapper", FileWrapper)(file, buffer_size)
@implements_iterator
class FileWrapper(object):
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, "close"):
self.file.close()
def seekable(self):
if hasattr(self.file, "seekable"):
return self.file.seekable()
if hasattr(self.file, "seek"):
return True
return False
def seek(self, *args):
if hasattr(self.file, "seek"):
self.file.seek(*args)
def tell(self):
if hasattr(self.file, "tell"):
return self.file.tell()
return None
def __iter__(self):
return self
def __next__(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
@implements_iterator
class _RangeWrapper(object):
# private for now, but should we make it public in the future ?
"""This class can be used to convert an iterable object into
an iterable that will only yield a piece of the underlying content.
It yields blocks until the underlying stream range is fully read.
The yielded blocks will have a size that can't exceed the original
iterator defined block size, but that can be smaller.
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param iterable: an iterable object with a :meth:`__next__` method.
:param start_byte: byte from which read will start.
:param byte_range: how many bytes to read.
"""
def __init__(self, iterable, start_byte=0, byte_range=None):
self.iterable = iter(iterable)
self.byte_range = byte_range
self.start_byte = start_byte
self.end_byte = None
if byte_range is not None:
self.end_byte = self.start_byte + self.byte_range
self.read_length = 0
self.seekable = hasattr(iterable, "seekable") and iterable.seekable()
self.end_reached = False
def __iter__(self):
return self
def _next_chunk(self):
try:
chunk = next(self.iterable)
self.read_length += len(chunk)
return chunk
except StopIteration:
self.end_reached = True
raise
def _first_iteration(self):
chunk = None
if self.seekable:
self.iterable.seek(self.start_byte)
self.read_length = self.iterable.tell()
contextual_read_length = self.read_length
else:
while self.read_length <= self.start_byte:
chunk = self._next_chunk()
if chunk is not None:
chunk = chunk[self.start_byte - self.read_length :]
contextual_read_length = self.start_byte
return chunk, contextual_read_length
def _next(self):
if self.end_reached:
raise StopIteration()
chunk = None
contextual_read_length = self.read_length
if self.read_length == 0:
chunk, contextual_read_length = self._first_iteration()
if chunk is None:
chunk = self._next_chunk()
if self.end_byte is not None and self.read_length >= self.end_byte:
self.end_reached = True
return chunk[: self.end_byte - contextual_read_length]
return chunk
def __next__(self):
chunk = self._next()
if chunk:
return chunk
self.end_reached = True
raise StopIteration()
def close(self):
if hasattr(self.iterable, "close"):
self.iterable.close()
def _make_chunk_iter(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, text_type)):
raise TypeError(
"Passed a string or byte object instead of true iterator or stream."
)
if not hasattr(stream, "read"):
for item in stream:
if item:
yield item
return
if not isinstance(stream, LimitedStream) and limit is not None:
stream = LimitedStream(stream, limit)
_read = stream.read
while 1:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(stream, limit=None, buffer_size=10 * 1024, cap_at_buffer=False):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
.. versionadded:: 0.11.10
added support for the `cap_at_buffer` parameter.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
:param cap_at_buffer: if this is set chunks are split if they are longer
than the buffer size. Internally this is implemented
that the buffer size might be exhausted by a factor
of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, "")
if not first_item:
return
s = make_literal_wrapper(first_item)
empty = s("")
cr = s("\r")
lf = s("\n")
crlf = s("\r\n")
_iter = chain((first_item,), _iter)
def _iter_basic_lines():
_join = empty.join
buffer = []
while 1:
new_data = next(_iter, "")
if not new_data:
break
new_buf = []
buf_size = 0
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
buf_size += len(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
elif cap_at_buffer and buf_size >= buffer_size:
rv = _join(new_buf)
while len(rv) >= buffer_size:
yield rv[:buffer_size]
rv = rv[buffer_size:]
new_buf = [rv]
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(
stream, separator, limit=None, buffer_size=10 * 1024, cap_at_buffer=False
):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
.. versionadded:: 0.11.10
added support for the `cap_at_buffer` parameter.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
:param cap_at_buffer: if this is set chunks are split if they are longer
than the buffer size. Internally this is implemented
that the buffer size might be exhausted by a factor
of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, "")
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, text_type):
separator = to_unicode(separator)
_split = re.compile(r"(%s)" % re.escape(separator)).split
_join = u"".join
else:
separator = to_bytes(separator)
_split = re.compile(b"(" + re.escape(separator) + b")").split
_join = b"".join
buffer = []
while 1:
new_data = next(_iter, "")
if not new_data:
break
chunks = _split(new_data)
new_buf = []
buf_size = 0
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
buf_size = 0
else:
buf_size += len(item)
new_buf.append(item)
if cap_at_buffer and buf_size >= buffer_size:
rv = _join(new_buf)
while len(rv) >= buffer_size:
yield rv[:buffer_size]
rv = rv[buffer_size:]
new_buf = [rv]
buf_size = len(rv)
buffer = new_buf
if buffer:
yield _join(buffer)
@implements_iterator
class LimitedStream(io.IOBase):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from .exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 64):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (IOError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self):
line = self.readline()
if not line:
raise StopIteration()
return line
def readable(self):
return True
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/testapp.py
|
# -*- coding: utf-8 -*-
"""
werkzeug.testapp
~~~~~~~~~~~~~~~~
Provide a small test application that can be used to test a WSGI server
and check it for WSGI compliance.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import base64
import os
import sys
from textwrap import wrap
from . import __version__ as _werkzeug_version
from .utils import escape
from .wrappers import BaseRequest as Request
from .wrappers import BaseResponse as Response
logo = Response(
base64.b64decode(
"""
R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP/////////
//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv
nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25
7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq
ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX
m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G
p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo
SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf
78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA
ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA
tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx
w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx
lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45
Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB
yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd
dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r
idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh
EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8
ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64
gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C
JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y
Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9
YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX
c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb
qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL
cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG
cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2
KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe
EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb
UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB
Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z
aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn
kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs
="""
),
mimetype="image/png",
)
TEMPLATE = u"""\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<title>WSGI Information</title>
<style type="text/css">
@import url(https://fonts.googleapis.com/css?family=Ubuntu);
body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; background-color: white; color: #000;
font-size: 15px; text-align: center; }
#logo { float: right; padding: 0 0 10px 10px; }
div.box { text-align: left; width: 45em; margin: auto; padding: 50px 0;
background-color: white; }
h1, h2 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
h1 { margin: 0 0 30px 0; }
h2 { font-size: 1.4em; margin: 1em 0 0.5em 0; }
table { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 }
table th { background-color: #AFC1C4; color: white; font-size: 0.72em;
font-weight: normal; width: 18em; vertical-align: top;
padding: 0.5em 0 0.1em 0.5em; }
table td { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
code { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
monospace; font-size: 0.7em; }
ul li { line-height: 1.5em; }
ul.path { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px;
list-style: none; background: #E8EFF0; }
ul.path li { line-height: 1.6em; }
li.virtual { color: #999; text-decoration: underline; }
li.exp { background: white; }
</style>
<div class="box">
<img src="?resource=logo" id="logo" alt="[The Werkzeug Logo]" />
<h1>WSGI Information</h1>
<p>
This page displays all available information about the WSGI server and
the underlying Python interpreter.
<h2 id="python-interpreter">Python Interpreter</h2>
<table>
<tr>
<th>Python Version
<td>%(python_version)s
<tr>
<th>Platform
<td>%(platform)s [%(os)s]
<tr>
<th>API Version
<td>%(api_version)s
<tr>
<th>Byteorder
<td>%(byteorder)s
<tr>
<th>Werkzeug Version
<td>%(werkzeug_version)s
</table>
<h2 id="wsgi-environment">WSGI Environment</h2>
<table>%(wsgi_env)s</table>
<h2 id="installed-eggs">Installed Eggs</h2>
<p>
The following python packages were installed on the system as
Python eggs:
<ul>%(python_eggs)s</ul>
<h2 id="sys-path">System Path</h2>
<p>
The following paths are the current contents of the load path. The
following entries are looked up for Python packages. Note that not
all items in this path are folders. Gray and underlined items are
entries pointing to invalid resources or used by custom import hooks
such as the zip importer.
<p>
Items with a bright background were expanded for display from a relative
path. If you encounter such paths in the output you might want to check
your setup as relative paths are usually problematic in multithreaded
environments.
<ul class="path">%(sys_path)s</ul>
</div>
"""
def iter_sys_path():
if os.name == "posix":
def strip(x):
prefix = os.path.expanduser("~")
if x.startswith(prefix):
x = "~" + x[len(prefix) :]
return x
else:
def strip(x):
return x
cwd = os.path.abspath(os.getcwd())
for item in sys.path:
path = os.path.join(cwd, item or os.path.curdir)
yield strip(os.path.normpath(path)), not os.path.isdir(path), path != item
def render_testapp(req):
try:
import pkg_resources
except ImportError:
eggs = ()
else:
eggs = sorted(pkg_resources.working_set, key=lambda x: x.project_name.lower())
python_eggs = []
for egg in eggs:
try:
version = egg.version
except (ValueError, AttributeError):
version = "unknown"
python_eggs.append(
"<li>%s <small>[%s]</small>" % (escape(egg.project_name), escape(version))
)
wsgi_env = []
sorted_environ = sorted(req.environ.items(), key=lambda x: repr(x[0]).lower())
for key, value in sorted_environ:
wsgi_env.append(
"<tr><th>%s<td><code>%s</code>"
% (escape(str(key)), " ".join(wrap(escape(repr(value)))))
)
sys_path = []
for item, virtual, expanded in iter_sys_path():
class_ = []
if virtual:
class_.append("virtual")
if expanded:
class_.append("exp")
sys_path.append(
"<li%s>%s"
% (' class="%s"' % " ".join(class_) if class_ else "", escape(item))
)
return (
TEMPLATE
% {
"python_version": "<br>".join(escape(sys.version).splitlines()),
"platform": escape(sys.platform),
"os": escape(os.name),
"api_version": sys.api_version,
"byteorder": sys.byteorder,
"werkzeug_version": _werkzeug_version,
"python_eggs": "\n".join(python_eggs),
"wsgi_env": "\n".join(wsgi_env),
"sys_path": "\n".join(sys_path),
}
).encode("utf-8")
def test_app(environ, start_response):
"""Simple test application that dumps the environment. You can use
it to check if Werkzeug is working properly:
.. sourcecode:: pycon
>>> from werkzeug.serving import run_simple
>>> from werkzeug.testapp import test_app
>>> run_simple('localhost', 3000, test_app)
* Running on http://localhost:3000/
The application displays important information from the WSGI environment,
the Python interpreter and the installed libraries.
"""
req = Request(environ, populate_request=False)
if req.args.get("resource") == "logo":
response = logo
else:
response = Response(render_testapp(req), mimetype="text/html")
return response(environ, start_response)
if __name__ == "__main__":
from .serving import run_simple
run_simple("localhost", 5000, test_app, use_reloader=True)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/middleware/dispatcher.py
|
"""
Application Dispatcher
======================
This middleware creates a single WSGI application that dispatches to
multiple other WSGI applications mounted at different URL paths.
A common example is writing a Single Page Application, where you have a
backend API and a frontend written in JavaScript that does the routing
in the browser rather than requesting different pages from the server.
The frontend is a single HTML and JS file that should be served for any
path besides "/api".
This example dispatches to an API app under "/api", an admin app
under "/admin", and an app that serves frontend files for all other
requests::
app = DispatcherMiddleware(serve_frontend, {
'/api': api_app,
'/admin': admin_app,
})
In production, you might instead handle this at the HTTP server level,
serving files or proxying to application servers based on location. The
API and admin apps would each be deployed with a separate WSGI server,
and the static files would be served directly by the HTTP server.
.. autoclass:: DispatcherMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
class DispatcherMiddleware(object):
"""Combine multiple applications as a single WSGI application.
Requests are dispatched to an application based on the path it is
mounted under.
:param app: The WSGI application to dispatch to if the request
doesn't match a mounted path.
:param mounts: Maps path prefixes to applications for dispatching.
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get("PATH_INFO", "")
path_info = ""
while "/" in script:
if script in self.mounts:
app = self.mounts[script]
break
script, last_item = script.rsplit("/", 1)
path_info = "/%s%s" % (last_item, path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get("SCRIPT_NAME", "")
environ["SCRIPT_NAME"] = original_script_name + script
environ["PATH_INFO"] = path_info
return app(environ, start_response)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/middleware/proxy_fix.py
|
"""
X-Forwarded-For Proxy Fix
=========================
This module provides a middleware that adjusts the WSGI environ based on
``X-Forwarded-`` headers that proxies in front of an application may
set.
When an application is running behind a proxy server, WSGI may see the
request as coming from that server rather than the real client. Proxies
set various headers to track where the request actually came from.
This middleware should only be applied if the application is actually
behind such a proxy, and should be configured with the number of proxies
that are chained in front of it. Not all proxies set all the headers.
Since incoming headers can be faked, you must set how many proxies are
setting each header so the middleware knows what to trust.
.. autoclass:: ProxyFix
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
from werkzeug.http import parse_list_header
class ProxyFix(object):
"""Adjust the WSGI environ based on ``X-Forwarded-`` that proxies in
front of the application may set.
- ``X-Forwarded-For`` sets ``REMOTE_ADDR``.
- ``X-Forwarded-Proto`` sets ``wsgi.url_scheme``.
- ``X-Forwarded-Host`` sets ``HTTP_HOST``, ``SERVER_NAME``, and
``SERVER_PORT``.
- ``X-Forwarded-Port`` sets ``HTTP_HOST`` and ``SERVER_PORT``.
- ``X-Forwarded-Prefix`` sets ``SCRIPT_NAME``.
You must tell the middleware how many proxies set each header so it
knows what values to trust. It is a security issue to trust values
that came from the client rather than a proxy.
The original values of the headers are stored in the WSGI
environ as ``werkzeug.proxy_fix.orig``, a dict.
:param app: The WSGI application to wrap.
:param x_for: Number of values to trust for ``X-Forwarded-For``.
:param x_proto: Number of values to trust for ``X-Forwarded-Proto``.
:param x_host: Number of values to trust for ``X-Forwarded-Host``.
:param x_port: Number of values to trust for ``X-Forwarded-Port``.
:param x_prefix: Number of values to trust for
``X-Forwarded-Prefix``.
.. code-block:: python
from werkzeug.middleware.proxy_fix import ProxyFix
# App is behind one proxy that sets the -For and -Host headers.
app = ProxyFix(app, x_for=1, x_host=1)
.. versionchanged:: 1.0
Deprecated code has been removed:
* The ``num_proxies`` argument and attribute.
* The ``get_remote_addr`` method.
* The environ keys ``orig_remote_addr``,
``orig_wsgi_url_scheme``, and ``orig_http_host``.
.. versionchanged:: 0.15
All headers support multiple values. The ``num_proxies``
argument is deprecated. Each header is configured with a
separate number of trusted proxies.
.. versionchanged:: 0.15
Original WSGI environ values are stored in the
``werkzeug.proxy_fix.orig`` dict. ``orig_remote_addr``,
``orig_wsgi_url_scheme``, and ``orig_http_host`` are deprecated
and will be removed in 1.0.
.. versionchanged:: 0.15
Support ``X-Forwarded-Port`` and ``X-Forwarded-Prefix``.
.. versionchanged:: 0.15
``X-Forwarded-Host`` and ``X-Forwarded-Port`` modify
``SERVER_NAME`` and ``SERVER_PORT``.
"""
def __init__(self, app, x_for=1, x_proto=1, x_host=0, x_port=0, x_prefix=0):
self.app = app
self.x_for = x_for
self.x_proto = x_proto
self.x_host = x_host
self.x_port = x_port
self.x_prefix = x_prefix
def _get_real_value(self, trusted, value):
"""Get the real value from a list header based on the configured
number of trusted proxies.
:param trusted: Number of values to trust in the header.
:param value: Comma separated list header value to parse.
:return: The real value, or ``None`` if there are fewer values
than the number of trusted proxies.
.. versionchanged:: 1.0
Renamed from ``_get_trusted_comma``.
.. versionadded:: 0.15
"""
if not (trusted and value):
return
values = parse_list_header(value)
if len(values) >= trusted:
return values[-trusted]
def __call__(self, environ, start_response):
"""Modify the WSGI environ based on the various ``Forwarded``
headers before calling the wrapped application. Store the
original environ values in ``werkzeug.proxy_fix.orig_{key}``.
"""
environ_get = environ.get
orig_remote_addr = environ_get("REMOTE_ADDR")
orig_wsgi_url_scheme = environ_get("wsgi.url_scheme")
orig_http_host = environ_get("HTTP_HOST")
environ.update(
{
"werkzeug.proxy_fix.orig": {
"REMOTE_ADDR": orig_remote_addr,
"wsgi.url_scheme": orig_wsgi_url_scheme,
"HTTP_HOST": orig_http_host,
"SERVER_NAME": environ_get("SERVER_NAME"),
"SERVER_PORT": environ_get("SERVER_PORT"),
"SCRIPT_NAME": environ_get("SCRIPT_NAME"),
}
}
)
x_for = self._get_real_value(self.x_for, environ_get("HTTP_X_FORWARDED_FOR"))
if x_for:
environ["REMOTE_ADDR"] = x_for
x_proto = self._get_real_value(
self.x_proto, environ_get("HTTP_X_FORWARDED_PROTO")
)
if x_proto:
environ["wsgi.url_scheme"] = x_proto
x_host = self._get_real_value(self.x_host, environ_get("HTTP_X_FORWARDED_HOST"))
if x_host:
environ["HTTP_HOST"] = x_host
parts = x_host.split(":", 1)
environ["SERVER_NAME"] = parts[0]
if len(parts) == 2:
environ["SERVER_PORT"] = parts[1]
x_port = self._get_real_value(self.x_port, environ_get("HTTP_X_FORWARDED_PORT"))
if x_port:
host = environ.get("HTTP_HOST")
if host:
parts = host.split(":", 1)
host = parts[0] if len(parts) == 2 else host
environ["HTTP_HOST"] = "%s:%s" % (host, x_port)
environ["SERVER_PORT"] = x_port
x_prefix = self._get_real_value(
self.x_prefix, environ_get("HTTP_X_FORWARDED_PREFIX")
)
if x_prefix:
environ["SCRIPT_NAME"] = x_prefix
return self.app(environ, start_response)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/middleware/shared_data.py
|
"""
Serve Shared Static Files
=========================
.. autoclass:: SharedDataMiddleware
:members: is_allowed
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import mimetypes
import os
import pkgutil
import posixpath
from datetime import datetime
from io import BytesIO
from time import mktime
from time import time
from zlib import adler32
from .._compat import PY2
from .._compat import string_types
from ..filesystem import get_filesystem_encoding
from ..http import http_date
from ..http import is_resource_modified
from ..security import safe_join
from ..utils import get_content_type
from ..wsgi import get_path_info
from ..wsgi import wrap_file
class SharedDataMiddleware(object):
"""A WSGI middleware that provides static content for development
environments or simple server setups. Usage is quite simple::
import os
from werkzeug.middleware.shared_data import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/static': os.path.join(os.path.dirname(__file__), 'static')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. One can also mount
files on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/static': ('myapplication', 'static')
})
This will then serve the ``static`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non ASCII filenames. If the
encoding on the file system happens to be the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a list or dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param cache: enable or disable caching headers.
:param cache_timeout: the cache timeout in seconds for the headers.
:param fallback_mimetype: The fallback mimetype for unknown files.
.. versionchanged:: 1.0
The default ``fallback_mimetype`` is
``application/octet-stream``. If a filename looks like a text
mimetype, the ``utf-8`` charset is added to it.
.. versionadded:: 0.6
Added ``fallback_mimetype``.
.. versionchanged:: 0.5
Added ``cache_timeout``.
"""
def __init__(
self,
app,
exports,
disallow=None,
cache=True,
cache_timeout=60 * 60 * 12,
fallback_mimetype="application/octet-stream",
):
self.app = app
self.exports = []
self.cache = cache
self.cache_timeout = cache_timeout
if hasattr(exports, "items"):
exports = exports.items()
for key, value in exports:
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, string_types):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError("unknown def %r" % value)
self.exports.append((key, loader))
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename):
return lambda: (
open(filename, "rb"),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename)),
)
def get_file_loader(self, filename):
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
loadtime = datetime.utcnow()
provider = pkgutil.get_loader(package)
if hasattr(provider, "get_resource_reader"):
# Python 3
reader = provider.get_resource_reader(package)
def loader(path):
if path is None:
return None, None
path = safe_join(package_path, path)
basename = posixpath.basename(path)
try:
resource = reader.open_resource(path)
except IOError:
return None, None
if isinstance(resource, BytesIO):
return (
basename,
lambda: (resource, loadtime, len(resource.getvalue())),
)
return (
basename,
lambda: (
resource,
datetime.utcfromtimestamp(os.path.getmtime(resource.name)),
os.path.getsize(resource.name),
),
)
else:
# Python 2
package_filename = provider.get_filename(package)
is_filesystem = os.path.exists(package_filename)
root = os.path.join(os.path.dirname(package_filename), package_path)
def loader(path):
if path is None:
return None, None
path = safe_join(root, path)
basename = posixpath.basename(path)
if is_filesystem:
if not os.path.isfile(path):
return None, None
return basename, self._opener(path)
try:
data = provider.get_data(path)
except IOError:
return None, None
return basename, lambda: (BytesIO(data), loadtime, len(data))
return loader
def get_directory_loader(self, directory):
def loader(path):
if path is not None:
path = safe_join(directory, path)
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime, file_size, real_filename):
if not isinstance(real_filename, bytes):
real_filename = real_filename.encode(get_filesystem_encoding())
return "wzsdm-%d-%s-%s" % (
mktime(mtime.timetuple()),
file_size,
adler32(real_filename) & 0xFFFFFFFF,
)
def __call__(self, environ, start_response):
path = get_path_info(environ)
if PY2:
path = path.encode(get_filesystem_encoding())
file_loader = None
for search_path, loader in self.exports:
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith("/"):
search_path += "/"
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path) :])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename):
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
mime_type = get_content_type(guessed_type[0] or self.fallback_mimetype, "utf-8")
f, mtime, file_size = file_loader()
headers = [("Date", http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename)
headers += [
("Etag", '"%s"' % etag),
("Cache-Control", "max-age=%d, public" % timeout),
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response("304 Not Modified", headers)
return []
headers.append(("Expires", http_date(time() + timeout)))
else:
headers.append(("Cache-Control", "public"))
headers.extend(
(
("Content-Type", mime_type),
("Content-Length", str(file_size)),
("Last-Modified", http_date(mtime)),
)
)
start_response("200 OK", headers)
return wrap_file(environ, f)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/middleware/http_proxy.py
|
"""
Basic HTTP Proxy
================
.. autoclass:: ProxyMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import socket
from ..datastructures import EnvironHeaders
from ..http import is_hop_by_hop_header
from ..urls import url_parse
from ..urls import url_quote
from ..wsgi import get_input_stream
try:
from http import client
except ImportError:
import httplib as client
class ProxyMiddleware(object):
"""Proxy requests under a path to an external server, routing other
requests to the app.
This middleware can only proxy HTTP requests, as that is the only
protocol handled by the WSGI server. Other protocols, such as
websocket requests, cannot be proxied at this layer. This should
only be used for development, in production a real proxying server
should be used.
The middleware takes a dict that maps a path prefix to a dict
describing the host to be proxied to::
app = ProxyMiddleware(app, {
"/static/": {
"target": "http://127.0.0.1:5001/",
}
})
Each host has the following options:
``target``:
The target URL to dispatch to. This is required.
``remove_prefix``:
Whether to remove the prefix from the URL before dispatching it
to the target. The default is ``False``.
``host``:
``"<auto>"`` (default):
The host header is automatically rewritten to the URL of the
target.
``None``:
The host header is unmodified from the client request.
Any other value:
The host header is overwritten with the value.
``headers``:
A dictionary of headers to be sent with the request to the
target. The default is ``{}``.
``ssl_context``:
A :class:`ssl.SSLContext` defining how to verify requests if the
target is HTTPS. The default is ``None``.
In the example above, everything under ``"/static/"`` is proxied to
the server on port 5001. The host header is rewritten to the target,
and the ``"/static/"`` prefix is removed from the URLs.
:param app: The WSGI application to wrap.
:param targets: Proxy target configurations. See description above.
:param chunk_size: Size of chunks to read from input stream and
write to target.
:param timeout: Seconds before an operation to a target fails.
.. versionadded:: 0.14
"""
def __init__(self, app, targets, chunk_size=2 << 13, timeout=10):
def _set_defaults(opts):
opts.setdefault("remove_prefix", False)
opts.setdefault("host", "<auto>")
opts.setdefault("headers", {})
opts.setdefault("ssl_context", None)
return opts
self.app = app
self.targets = dict(
("/%s/" % k.strip("/"), _set_defaults(v)) for k, v in targets.items()
)
self.chunk_size = chunk_size
self.timeout = timeout
def proxy_to(self, opts, path, prefix):
target = url_parse(opts["target"])
def application(environ, start_response):
headers = list(EnvironHeaders(environ).items())
headers[:] = [
(k, v)
for k, v in headers
if not is_hop_by_hop_header(k)
and k.lower() not in ("content-length", "host")
]
headers.append(("Connection", "close"))
if opts["host"] == "<auto>":
headers.append(("Host", target.ascii_host))
elif opts["host"] is None:
headers.append(("Host", environ["HTTP_HOST"]))
else:
headers.append(("Host", opts["host"]))
headers.extend(opts["headers"].items())
remote_path = path
if opts["remove_prefix"]:
remote_path = "%s/%s" % (
target.path.rstrip("/"),
remote_path[len(prefix) :].lstrip("/"),
)
content_length = environ.get("CONTENT_LENGTH")
chunked = False
if content_length not in ("", None):
headers.append(("Content-Length", content_length))
elif content_length is not None:
headers.append(("Transfer-Encoding", "chunked"))
chunked = True
try:
if target.scheme == "http":
con = client.HTTPConnection(
target.ascii_host, target.port or 80, timeout=self.timeout
)
elif target.scheme == "https":
con = client.HTTPSConnection(
target.ascii_host,
target.port or 443,
timeout=self.timeout,
context=opts["ssl_context"],
)
else:
raise RuntimeError(
"Target scheme must be 'http' or 'https', got '{}'.".format(
target.scheme
)
)
con.connect()
remote_url = url_quote(remote_path)
querystring = environ["QUERY_STRING"]
if querystring:
remote_url = remote_url + "?" + querystring
con.putrequest(environ["REQUEST_METHOD"], remote_url, skip_host=True)
for k, v in headers:
if k.lower() == "connection":
v = "close"
con.putheader(k, v)
con.endheaders()
stream = get_input_stream(environ)
while 1:
data = stream.read(self.chunk_size)
if not data:
break
if chunked:
con.send(b"%x\r\n%s\r\n" % (len(data), data))
else:
con.send(data)
resp = con.getresponse()
except socket.error:
from ..exceptions import BadGateway
return BadGateway()(environ, start_response)
start_response(
"%d %s" % (resp.status, resp.reason),
[
(k.title(), v)
for k, v in resp.getheaders()
if not is_hop_by_hop_header(k)
],
)
def read():
while 1:
try:
data = resp.read(self.chunk_size)
except socket.error:
break
if not data:
break
yield data
return read()
return application
def __call__(self, environ, start_response):
path = environ["PATH_INFO"]
app = self.app
for prefix, opts in self.targets.items():
if path.startswith(prefix):
app = self.proxy_to(opts, path, prefix)
break
return app(environ, start_response)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/middleware/__init__.py
|
"""
Middleware
==========
A WSGI middleware is a WSGI application that wraps another application
in order to observe or change its behavior. Werkzeug provides some
middleware for common use cases.
.. toctree::
:maxdepth: 1
proxy_fix
shared_data
dispatcher
http_proxy
lint
profiler
The :doc:`interactive debugger </debug>` is also a middleware that can
be applied manually, although it is typically used automatically with
the :doc:`development server </serving>`.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/middleware/profiler.py
|
"""
Application Profiler
====================
This module provides a middleware that profiles each request with the
:mod:`cProfile` module. This can help identify bottlenecks in your code
that may be slowing down your application.
.. autoclass:: ProfilerMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
from __future__ import print_function
import os.path
import sys
import time
from pstats import Stats
try:
from cProfile import Profile
except ImportError:
from profile import Profile
class ProfilerMiddleware(object):
"""Wrap a WSGI application and profile the execution of each
request. Responses are buffered so that timings are more exact.
If ``stream`` is given, :class:`pstats.Stats` are written to it
after each request. If ``profile_dir`` is given, :mod:`cProfile`
data files are saved to that directory, one file per request.
The filename can be customized by passing ``filename_format``. If
it is a string, it will be formatted using :meth:`str.format` with
the following fields available:
- ``{method}`` - The request method; GET, POST, etc.
- ``{path}`` - The request path or 'root' should one not exist.
- ``{elapsed}`` - The elapsed time of the request.
- ``{time}`` - The time of the request.
If it is a callable, it will be called with the WSGI ``environ``
dict and should return a filename.
:param app: The WSGI application to wrap.
:param stream: Write stats to this stream. Disable with ``None``.
:param sort_by: A tuple of columns to sort stats by. See
:meth:`pstats.Stats.sort_stats`.
:param restrictions: A tuple of restrictions to filter stats by. See
:meth:`pstats.Stats.print_stats`.
:param profile_dir: Save profile data files to this directory.
:param filename_format: Format string for profile data file names,
or a callable returning a name. See explanation above.
.. code-block:: python
from werkzeug.middleware.profiler import ProfilerMiddleware
app = ProfilerMiddleware(app)
.. versionchanged:: 0.15
Stats are written even if ``profile_dir`` is given, and can be
disable by passing ``stream=None``.
.. versionadded:: 0.15
Added ``filename_format``.
.. versionadded:: 0.9
Added ``restrictions`` and ``profile_dir``.
"""
def __init__(
self,
app,
stream=sys.stdout,
sort_by=("time", "calls"),
restrictions=(),
profile_dir=None,
filename_format="{method}.{path}.{elapsed:.0f}ms.{time:.0f}.prof",
):
self._app = app
self._stream = stream
self._sort_by = sort_by
self._restrictions = restrictions
self._profile_dir = profile_dir
self._filename_format = filename_format
def __call__(self, environ, start_response):
response_body = []
def catching_start_response(status, headers, exc_info=None):
start_response(status, headers, exc_info)
return response_body.append
def runapp():
app_iter = self._app(environ, catching_start_response)
response_body.extend(app_iter)
if hasattr(app_iter, "close"):
app_iter.close()
profile = Profile()
start = time.time()
profile.runcall(runapp)
body = b"".join(response_body)
elapsed = time.time() - start
if self._profile_dir is not None:
if callable(self._filename_format):
filename = self._filename_format(environ)
else:
filename = self._filename_format.format(
method=environ["REQUEST_METHOD"],
path=(
environ.get("PATH_INFO").strip("/").replace("/", ".") or "root"
),
elapsed=elapsed * 1000.0,
time=time.time(),
)
filename = os.path.join(self._profile_dir, filename)
profile.dump_stats(filename)
if self._stream is not None:
stats = Stats(profile, stream=self._stream)
stats.sort_stats(*self._sort_by)
print("-" * 80, file=self._stream)
print("PATH: {!r}".format(environ.get("PATH_INFO", "")), file=self._stream)
stats.print_stats(*self._restrictions)
print("-" * 80 + "\n", file=self._stream)
return [body]
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/middleware/lint.py
|
"""
WSGI Protocol Linter
====================
This module provides a middleware that performs sanity checks on the
behavior of the WSGI server and application. It checks that the
:pep:`3333` WSGI spec is properly implemented. It also warns on some
common HTTP errors such as non-empty responses for 304 status codes.
.. autoclass:: LintMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
from warnings import warn
from .._compat import implements_iterator
from .._compat import PY2
from .._compat import string_types
from ..datastructures import Headers
from ..http import is_entity_header
from ..wsgi import FileWrapper
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_string(context, obj, stacklevel=3):
if type(obj) is not str:
warn(
"'%s' requires strings, got '%s'" % (context, type(obj).__name__),
WSGIWarning,
)
class InputStream(object):
def __init__(self, stream):
self._stream = stream
def read(self, *args):
if len(args) == 0:
warn(
"WSGI does not guarantee an EOF marker on the input stream, thus making"
" calls to 'wsgi.input.read()' unsafe. Conforming servers may never"
" return from this call.",
WSGIWarning,
stacklevel=2,
)
elif len(args) != 1:
warn(
"Too many parameters passed to 'wsgi.input.read()'.",
WSGIWarning,
stacklevel=2,
)
return self._stream.read(*args)
def readline(self, *args):
if len(args) == 0:
warn(
"Calls to 'wsgi.input.readline()' without arguments are unsafe. Use"
" 'wsgi.input.read()' instead.",
WSGIWarning,
stacklevel=2,
)
elif len(args) == 1:
warn(
"'wsgi.input.readline()' was called with a size hint. WSGI does not"
" support this, although it's available on all major servers.",
WSGIWarning,
stacklevel=2,
)
else:
raise TypeError("Too many arguments passed to 'wsgi.input.readline()'.")
return self._stream.readline(*args)
def __iter__(self):
try:
return iter(self._stream)
except TypeError:
warn("'wsgi.input' is not iterable.", WSGIWarning, stacklevel=2)
return iter(())
def close(self):
warn("The application closed the input stream!", WSGIWarning, stacklevel=2)
self._stream.close()
class ErrorStream(object):
def __init__(self, stream):
self._stream = stream
def write(self, s):
check_string("wsgi.error.write()", s)
self._stream.write(s)
def flush(self):
self._stream.flush()
def writelines(self, seq):
for line in seq:
self.write(line)
def close(self):
warn("The application closed the error stream!", WSGIWarning, stacklevel=2)
self._stream.close()
class GuardedWrite(object):
def __init__(self, write, chunks):
self._write = write
self._chunks = chunks
def __call__(self, s):
check_string("write()", s)
self._write.write(s)
self._chunks.append(len(s))
@implements_iterator
class GuardedIterator(object):
def __init__(self, iterator, headers_set, chunks):
self._iterator = iterator
if PY2:
self._next = iter(iterator).next
else:
self._next = iter(iterator).__next__
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self):
return self
def __next__(self):
if self.closed:
warn("Iterated over closed 'app_iter'.", WSGIWarning, stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(
"The application returned before it started the response.",
WSGIWarning,
stacklevel=2,
)
check_string("application iterator items", rv)
self.chunks.append(len(rv))
return rv
def close(self):
self.closed = True
if hasattr(self._iterator, "close"):
self._iterator.close()
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get("content-length", type=int)
if status_code == 304:
for key, _value in headers:
key = key.lower()
if key not in ("expires", "content-location") and is_entity_header(
key
):
warn(
"Entity header %r found in 304 response." % key, HTTPWarning
)
if bytes_sent:
warn("304 responses must not have a body.", HTTPWarning)
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(
"%r responses must have an empty content length." % status_code,
HTTPWarning,
)
if bytes_sent:
warn(
"%r responses must not have a body." % status_code, HTTPWarning
)
elif content_length is not None and content_length != bytes_sent:
warn(
"Content-Length and the number of bytes sent to the client do not"
" match.",
WSGIWarning,
)
def __del__(self):
if not self.closed:
try:
warn(
"Iterator was garbage collected before it was closed.", WSGIWarning
)
except Exception:
pass
class LintMiddleware(object):
"""Warns about common errors in the WSGI and HTTP behavior of the
server and wrapped application. Some of the issues it check are:
- invalid status codes
- non-bytestrings sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Error information is emitted using the :mod:`warnings` module.
:param app: The WSGI application to wrap.
.. code-block:: python
from werkzeug.middleware.lint import LintMiddleware
app = LintMiddleware(app)
"""
def __init__(self, app):
self.app = app
def check_environ(self, environ):
if type(environ) is not dict:
warn(
"WSGI environment is not a standard Python dict.",
WSGIWarning,
stacklevel=4,
)
for key in (
"REQUEST_METHOD",
"SERVER_NAME",
"SERVER_PORT",
"wsgi.version",
"wsgi.input",
"wsgi.errors",
"wsgi.multithread",
"wsgi.multiprocess",
"wsgi.run_once",
):
if key not in environ:
warn(
"Required environment key %r not found" % key,
WSGIWarning,
stacklevel=3,
)
if environ["wsgi.version"] != (1, 0):
warn("Environ is not a WSGI 1.0 environ.", WSGIWarning, stacklevel=3)
script_name = environ.get("SCRIPT_NAME", "")
path_info = environ.get("PATH_INFO", "")
if script_name and script_name[0] != "/":
warn(
"'SCRIPT_NAME' does not start with a slash: %r" % script_name,
WSGIWarning,
stacklevel=3,
)
if path_info and path_info[0] != "/":
warn(
"'PATH_INFO' does not start with a slash: %r" % path_info,
WSGIWarning,
stacklevel=3,
)
def check_start_response(self, status, headers, exc_info):
check_string("status", status)
status_code = status.split(None, 1)[0]
if len(status_code) != 3 or not status_code.isdigit():
warn(WSGIWarning("Status code must be three digits"), stacklevel=3)
if len(status) < 4 or status[3] != " ":
warn(
WSGIWarning(
"Invalid value for status %r. Valid "
"status strings are three digits, a space "
"and a status explanation"
),
stacklevel=3,
)
status_code = int(status_code)
if status_code < 100:
warn(WSGIWarning("status code < 100 detected"), stacklevel=3)
if type(headers) is not list:
warn(WSGIWarning("header list is not a list"), stacklevel=3)
for item in headers:
if type(item) is not tuple or len(item) != 2:
warn(WSGIWarning("Headers must tuple 2-item tuples"), stacklevel=3)
name, value = item
if type(name) is not str or type(value) is not str:
warn(WSGIWarning("header items must be strings"), stacklevel=3)
if name.lower() == "status":
warn(
WSGIWarning(
"The status header is not supported due to "
"conflicts with the CGI spec."
),
stacklevel=3,
)
if exc_info is not None and not isinstance(exc_info, tuple):
warn(WSGIWarning("invalid value for exc_info"), stacklevel=3)
headers = Headers(headers)
self.check_headers(headers)
return status_code, headers
def check_headers(self, headers):
etag = headers.get("etag")
if etag is not None:
if etag.startswith(("W/", "w/")):
if etag.startswith("w/"):
warn(
HTTPWarning("weak etag indicator should be upcase."),
stacklevel=4,
)
etag = etag[2:]
if not (etag[:1] == etag[-1:] == '"'):
warn(HTTPWarning("unquoted etag emitted."), stacklevel=4)
location = headers.get("location")
if location is not None:
if not urlparse(location).netloc:
warn(
HTTPWarning("absolute URLs required for location header"),
stacklevel=4,
)
def check_iterator(self, app_iter):
if isinstance(app_iter, string_types):
warn(
"The application returned astring. The response will send one character"
" at a time to the client, which will kill performance. Return a list"
" or iterable instead.",
WSGIWarning,
stacklevel=3,
)
def __call__(self, *args, **kwargs):
if len(args) != 2:
warn("A WSGI app takes two arguments.", WSGIWarning, stacklevel=2)
if kwargs:
warn(
"A WSGI app does not take keyword arguments.", WSGIWarning, stacklevel=2
)
environ, start_response = args
self.check_environ(environ)
environ["wsgi.input"] = InputStream(environ["wsgi.input"])
environ["wsgi.errors"] = ErrorStream(environ["wsgi.errors"])
# Hook our own file wrapper in so that applications will always
# iterate to the end and we can check the content length.
environ["wsgi.file_wrapper"] = FileWrapper
headers_set = []
chunks = []
def checking_start_response(*args, **kwargs):
if len(args) not in (2, 3):
warn(
"Invalid number of arguments: %s, expected 2 or 3." % len(args),
WSGIWarning,
stacklevel=2,
)
if kwargs:
warn("'start_response' does not take keyword arguments.", WSGIWarning)
status, headers = args[:2]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
headers_set[:] = self.check_start_response(status, headers, exc_info)
return GuardedWrite(start_response(status, headers, exc_info), chunks)
app_iter = self.app(environ, checking_start_response)
self.check_iterator(app_iter)
return GuardedIterator(app_iter, headers_set, chunks)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/wrappers/auth.py
|
from ..http import parse_authorization_header
from ..http import parse_www_authenticate_header
from ..utils import cached_property
class AuthorizationMixin(object):
"""Adds an :attr:`authorization` property that represents the parsed
value of the `Authorization` header as
:class:`~werkzeug.datastructures.Authorization` object.
"""
@cached_property
def authorization(self):
"""The `Authorization` object in parsed form."""
header = self.environ.get("HTTP_AUTHORIZATION")
return parse_authorization_header(header)
class WWWAuthenticateMixin(object):
"""Adds a :attr:`www_authenticate` property to a response object."""
@property
def www_authenticate(self):
"""The `WWW-Authenticate` header in a parsed form."""
def on_update(www_auth):
if not www_auth and "www-authenticate" in self.headers:
del self.headers["www-authenticate"]
elif www_auth:
self.headers["WWW-Authenticate"] = www_auth.to_header()
header = self.headers.get("www-authenticate")
return parse_www_authenticate_header(header, on_update)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/wrappers/user_agent.py
|
from ..useragents import UserAgent
from ..utils import cached_property
class UserAgentMixin(object):
"""Adds a `user_agent` attribute to the request object which
contains the parsed user agent of the browser that triggered the
request as a :class:`~werkzeug.useragents.UserAgent` object.
"""
@cached_property
def user_agent(self):
"""The current user agent."""
return UserAgent(self.environ)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/wrappers/cors.py
|
from ..http import dump_header
from ..http import parse_set_header
from ..utils import environ_property
from ..utils import header_property
class CORSRequestMixin(object):
"""A mixin for :class:`~werkzeug.wrappers.BaseRequest` subclasses
that adds descriptors for Cross Origin Resource Sharing (CORS)
headers.
.. versionadded:: 1.0
"""
origin = environ_property(
"HTTP_ORIGIN",
doc=(
"The host that the request originated from. Set"
" :attr:`~CORSResponseMixin.access_control_allow_origin` on"
" the response to indicate which origins are allowed."
),
)
access_control_request_headers = environ_property(
"HTTP_ACCESS_CONTROL_REQUEST_HEADERS",
load_func=parse_set_header,
doc=(
"Sent with a preflight request to indicate which headers"
" will be sent with the cross origin request. Set"
" :attr:`~CORSResponseMixin.access_control_allow_headers`"
" on the response to indicate which headers are allowed."
),
)
access_control_request_method = environ_property(
"HTTP_ACCESS_CONTROL_REQUEST_METHOD",
doc=(
"Sent with a preflight request to indicate which method"
" will be used for the cross origin request. Set"
" :attr:`~CORSResponseMixin.access_control_allow_methods`"
" on the response to indicate which methods are allowed."
),
)
class CORSResponseMixin(object):
"""A mixin for :class:`~werkzeug.wrappers.BaseResponse` subclasses
that adds descriptors for Cross Origin Resource Sharing (CORS)
headers.
.. versionadded:: 1.0
"""
@property
def access_control_allow_credentials(self):
"""Whether credentials can be shared by the browser to
JavaScript code. As part of the preflight request it indicates
whether credentials can be used on the cross origin request.
"""
return "Access-Control-Allow-Credentials" in self.headers
@access_control_allow_credentials.setter
def access_control_allow_credentials(self, value):
if value is True:
self.headers["Access-Control-Allow-Credentials"] = "true"
else:
self.headers.pop("Access-Control-Allow-Credentials", None)
access_control_allow_headers = header_property(
"Access-Control-Allow-Headers",
load_func=parse_set_header,
dump_func=dump_header,
doc="Which headers can be sent with the cross origin request.",
)
access_control_allow_methods = header_property(
"Access-Control-Allow-Methods",
load_func=parse_set_header,
dump_func=dump_header,
doc="Which methods can be used for the cross origin request.",
)
access_control_allow_origin = header_property(
"Access-Control-Allow-Origin",
doc="The origin or '*' for any origin that may make cross origin requests.",
)
access_control_expose_headers = header_property(
"Access-Control-Expose-Headers",
load_func=parse_set_header,
dump_func=dump_header,
doc="Which headers can be shared by the browser to JavaScript code.",
)
access_control_max_age = header_property(
"Access-Control-Max-Age",
load_func=int,
dump_func=str,
doc="The maximum age in seconds the access control settings can be cached for.",
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/wrappers/base_response.py
|
import warnings
from .._compat import integer_types
from .._compat import string_types
from .._compat import text_type
from .._compat import to_bytes
from .._compat import to_native
from ..datastructures import Headers
from ..http import dump_cookie
from ..http import HTTP_STATUS_CODES
from ..http import remove_entity_headers
from ..urls import iri_to_uri
from ..urls import url_join
from ..utils import get_content_type
from ..wsgi import ClosingIterator
from ..wsgi import get_current_url
def _run_wsgi_app(*args):
"""This function replaces itself to ensure that the test module is not
imported unless required. DO NOT USE!
"""
global _run_wsgi_app
from ..test import run_wsgi_app as _run_wsgi_app
return _run_wsgi_app(*args)
def _warn_if_string(iterable):
"""Helper for the response objects to check if the iterable returned
to the WSGI server is not a string.
"""
if isinstance(iterable, string_types):
warnings.warn(
"Response iterable was set to a string. This will appear to"
" work but means that the server will send the data to the"
" client one character at a time. This is almost never"
" intended behavior, use 'response.data' to assign strings"
" to the response object.",
stacklevel=2,
)
def _iter_encoded(iterable, charset):
for item in iterable:
if isinstance(item, text_type):
yield item.encode(charset)
else:
yield item
def _clean_accept_ranges(accept_ranges):
if accept_ranges is True:
return "bytes"
elif accept_ranges is False:
return "none"
elif isinstance(accept_ranges, text_type):
return to_native(accept_ranges)
raise ValueError("Invalid accept_ranges value")
class BaseResponse(object):
"""Base response class. The most important fact about a response object
is that it's a regular WSGI application. It's initialized with a couple
of response parameters (headers, body, status code etc.) and will start a
valid WSGI response when called with the environ and start response
callable.
Because it's a WSGI application itself processing usually ends before the
actual response is sent to the server. This helps debugging systems
because they can catch all the exceptions before responses are started.
Here a small example WSGI application that takes advantage of the
response objects::
from werkzeug.wrappers import BaseResponse as Response
def index():
return Response('Index page')
def application(environ, start_response):
path = environ.get('PATH_INFO') or '/'
if path == '/':
response = index()
else:
response = Response('Not Found', status=404)
return response(environ, start_response)
Like :class:`BaseRequest` which object is lacking a lot of functionality
implemented in mixins. This gives you a better control about the actual
API of your response objects, so you can create subclasses and add custom
functionality. A full featured response object is available as
:class:`Response` which implements a couple of useful mixins.
To enforce a new type of already existing responses you can use the
:meth:`force_type` method. This is useful if you're working with different
subclasses of response objects and you want to post process them with a
known interface.
Per default the response object will assume all the text data is `utf-8`
encoded. Please refer to :doc:`the unicode chapter </unicode>` for more
details about customizing the behavior.
Response can be any kind of iterable or string. If it's a string it's
considered being an iterable with one item which is the string passed.
Headers can be a list of tuples or a
:class:`~werkzeug.datastructures.Headers` object.
Special note for `mimetype` and `content_type`: For most mime types
`mimetype` and `content_type` work the same, the difference affects
only 'text' mimetypes. If the mimetype passed with `mimetype` is a
mimetype starting with `text/`, the charset parameter of the response
object is appended to it. In contrast the `content_type` parameter is
always added as header unmodified.
.. versionchanged:: 0.5
the `direct_passthrough` parameter was added.
:param response: a string or response iterable.
:param status: a string with a status or an integer with the status code.
:param headers: a list of headers or a
:class:`~werkzeug.datastructures.Headers` object.
:param mimetype: the mimetype for the response. See notice above.
:param content_type: the content type for the response. See notice above.
:param direct_passthrough: if set to `True` :meth:`iter_encoded` is not
called before iteration which makes it
possible to pass special iterators through
unchanged (see :func:`wrap_file` for more
details.)
"""
#: the charset of the response.
charset = "utf-8"
#: the default status if none is provided.
default_status = 200
#: the default mimetype if none is provided.
default_mimetype = "text/plain"
#: if set to `False` accessing properties on the response object will
#: not try to consume the response iterator and convert it into a list.
#:
#: .. versionadded:: 0.6.2
#:
#: That attribute was previously called `implicit_seqence_conversion`.
#: (Notice the typo). If you did use this feature, you have to adapt
#: your code to the name change.
implicit_sequence_conversion = True
#: Should this response object correct the location header to be RFC
#: conformant? This is true by default.
#:
#: .. versionadded:: 0.8
autocorrect_location_header = True
#: Should this response object automatically set the content-length
#: header if possible? This is true by default.
#:
#: .. versionadded:: 0.8
automatically_set_content_length = True
#: Warn if a cookie header exceeds this size. The default, 4093, should be
#: safely `supported by most browsers <cookie_>`_. A cookie larger than
#: this size will still be sent, but it may be ignored or handled
#: incorrectly by some browsers. Set to 0 to disable this check.
#:
#: .. versionadded:: 0.13
#:
#: .. _`cookie`: http://browsercookielimits.squawky.net/
max_cookie_size = 4093
def __init__(
self,
response=None,
status=None,
headers=None,
mimetype=None,
content_type=None,
direct_passthrough=False,
):
if isinstance(headers, Headers):
self.headers = headers
elif not headers:
self.headers = Headers()
else:
self.headers = Headers(headers)
if content_type is None:
if mimetype is None and "content-type" not in self.headers:
mimetype = self.default_mimetype
if mimetype is not None:
mimetype = get_content_type(mimetype, self.charset)
content_type = mimetype
if content_type is not None:
self.headers["Content-Type"] = content_type
if status is None:
status = self.default_status
if isinstance(status, integer_types):
self.status_code = status
else:
self.status = status
self.direct_passthrough = direct_passthrough
self._on_close = []
# we set the response after the headers so that if a class changes
# the charset attribute, the data is set in the correct charset.
if response is None:
self.response = []
elif isinstance(response, (text_type, bytes, bytearray)):
self.set_data(response)
else:
self.response = response
def call_on_close(self, func):
"""Adds a function to the internal list of functions that should
be called as part of closing down the response. Since 0.7 this
function also returns the function that was passed so that this
can be used as a decorator.
.. versionadded:: 0.6
"""
self._on_close.append(func)
return func
def __repr__(self):
if self.is_sequence:
body_info = "%d bytes" % sum(map(len, self.iter_encoded()))
else:
body_info = "streamed" if self.is_streamed else "likely-streamed"
return "<%s %s [%s]>" % (self.__class__.__name__, body_info, self.status)
@classmethod
def force_type(cls, response, environ=None):
"""Enforce that the WSGI response is a response object of the current
type. Werkzeug will use the :class:`BaseResponse` internally in many
situations like the exceptions. If you call :meth:`get_response` on an
exception you will get back a regular :class:`BaseResponse` object, even
if you are using a custom subclass.
This method can enforce a given response type, and it will also
convert arbitrary WSGI callables into response objects if an environ
is provided::
# convert a Werkzeug response object into an instance of the
# MyResponseClass subclass.
response = MyResponseClass.force_type(response)
# convert any WSGI application into a response object
response = MyResponseClass.force_type(response, environ)
This is especially useful if you want to post-process responses in
the main dispatcher and use functionality provided by your subclass.
Keep in mind that this will modify response objects in place if
possible!
:param response: a response object or wsgi application.
:param environ: a WSGI environment object.
:return: a response object.
"""
if not isinstance(response, BaseResponse):
if environ is None:
raise TypeError(
"cannot convert WSGI application into response"
" objects without an environ"
)
response = BaseResponse(*_run_wsgi_app(response, environ))
response.__class__ = cls
return response
@classmethod
def from_app(cls, app, environ, buffered=False):
"""Create a new response object from an application output. This
works best if you pass it an application that returns a generator all
the time. Sometimes applications may use the `write()` callable
returned by the `start_response` function. This tries to resolve such
edge cases automatically. But if you don't get the expected output
you should set `buffered` to `True` which enforces buffering.
:param app: the WSGI application to execute.
:param environ: the WSGI environment to execute against.
:param buffered: set to `True` to enforce buffering.
:return: a response object.
"""
return cls(*_run_wsgi_app(app, environ, buffered))
@property
def status_code(self):
"""The HTTP status code as a number."""
return self._status_code
@status_code.setter
def status_code(self, code):
self._status_code = code
try:
self._status = "%d %s" % (code, HTTP_STATUS_CODES[code].upper())
except KeyError:
self._status = "%d UNKNOWN" % code
@property
def status(self):
"""The HTTP status code as a string."""
return self._status
@status.setter
def status(self, value):
try:
self._status = to_native(value)
except AttributeError:
raise TypeError("Invalid status argument")
try:
self._status_code = int(self._status.split(None, 1)[0])
except ValueError:
self._status_code = 0
self._status = "0 %s" % self._status
except IndexError:
raise ValueError("Empty status argument")
def get_data(self, as_text=False):
"""The string representation of the request body. Whenever you call
this property the request iterable is encoded and flattened. This
can lead to unwanted behavior if you stream big data.
This behavior can be disabled by setting
:attr:`implicit_sequence_conversion` to `False`.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
self._ensure_sequence()
rv = b"".join(self.iter_encoded())
if as_text:
rv = rv.decode(self.charset)
return rv
def set_data(self, value):
"""Sets a new string as response. The value set must be either a
unicode or bytestring. If a unicode string is set it's encoded
automatically to the charset of the response (utf-8 by default).
.. versionadded:: 0.9
"""
# if an unicode string is set, it's encoded directly so that we
# can set the content length
if isinstance(value, text_type):
value = value.encode(self.charset)
else:
value = bytes(value)
self.response = [value]
if self.automatically_set_content_length:
self.headers["Content-Length"] = str(len(value))
data = property(
get_data,
set_data,
doc="A descriptor that calls :meth:`get_data` and :meth:`set_data`.",
)
def calculate_content_length(self):
"""Returns the content length if available or `None` otherwise."""
try:
self._ensure_sequence()
except RuntimeError:
return None
return sum(len(x) for x in self.iter_encoded())
def _ensure_sequence(self, mutable=False):
"""This method can be called by methods that need a sequence. If
`mutable` is true, it will also ensure that the response sequence
is a standard Python list.
.. versionadded:: 0.6
"""
if self.is_sequence:
# if we need a mutable object, we ensure it's a list.
if mutable and not isinstance(self.response, list):
self.response = list(self.response)
return
if self.direct_passthrough:
raise RuntimeError(
"Attempted implicit sequence conversion but the"
" response object is in direct passthrough mode."
)
if not self.implicit_sequence_conversion:
raise RuntimeError(
"The response object required the iterable to be a"
" sequence, but the implicit conversion was disabled."
" Call make_sequence() yourself."
)
self.make_sequence()
def make_sequence(self):
"""Converts the response iterator in a list. By default this happens
automatically if required. If `implicit_sequence_conversion` is
disabled, this method is not automatically called and some properties
might raise exceptions. This also encodes all the items.
.. versionadded:: 0.6
"""
if not self.is_sequence:
# if we consume an iterable we have to ensure that the close
# method of the iterable is called if available when we tear
# down the response
close = getattr(self.response, "close", None)
self.response = list(self.iter_encoded())
if close is not None:
self.call_on_close(close)
def iter_encoded(self):
"""Iter the response encoded with the encoding of the response.
If the response object is invoked as WSGI application the return
value of this method is used as application iterator unless
:attr:`direct_passthrough` was activated.
"""
if __debug__:
_warn_if_string(self.response)
# Encode in a separate function so that self.response is fetched
# early. This allows us to wrap the response with the return
# value from get_app_iter or iter_encoded.
return _iter_encoded(self.response, self.charset)
def set_cookie(
self,
key,
value="",
max_age=None,
expires=None,
path="/",
domain=None,
secure=False,
httponly=False,
samesite=None,
):
"""Sets a cookie. The parameters are the same as in the cookie `Morsel`
object in the Python standard library but it accepts unicode data, too.
A warning is raised if the size of the cookie header exceeds
:attr:`max_cookie_size`, but the header will still be set.
:param key: the key (name) of the cookie to be set.
:param value: the value of the cookie.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session.
:param expires: should be a `datetime` object or UNIX timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: if you want to set a cross-domain cookie. For example,
``domain=".example.com"`` will set a cookie that is
readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: If `True`, the cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
:param samesite: Limits the scope of the cookie such that it will only
be attached to requests if those requests are
"same-site".
"""
self.headers.add(
"Set-Cookie",
dump_cookie(
key,
value=value,
max_age=max_age,
expires=expires,
path=path,
domain=domain,
secure=secure,
httponly=httponly,
charset=self.charset,
max_size=self.max_cookie_size,
samesite=samesite,
),
)
def delete_cookie(self, key, path="/", domain=None):
"""Delete a cookie. Fails silently if key doesn't exist.
:param key: the key (name) of the cookie to be deleted.
:param path: if the cookie that should be deleted was limited to a
path, the path has to be defined here.
:param domain: if the cookie that should be deleted was limited to a
domain, that domain has to be defined here.
"""
self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)
@property
def is_streamed(self):
"""If the response is streamed (the response is not an iterable with
a length information) this property is `True`. In this case streamed
means that there is no information about the number of iterations.
This is usually `True` if a generator is passed to the response object.
This is useful for checking before applying some sort of post
filtering that should not take place for streamed responses.
"""
try:
len(self.response)
except (TypeError, AttributeError):
return True
return False
@property
def is_sequence(self):
"""If the iterator is buffered, this property will be `True`. A
response object will consider an iterator to be buffered if the
response attribute is a list or tuple.
.. versionadded:: 0.6
"""
return isinstance(self.response, (tuple, list))
def close(self):
"""Close the wrapped response if possible. You can also use the object
in a with statement which will automatically close it.
.. versionadded:: 0.9
Can now be used in a with statement.
"""
if hasattr(self.response, "close"):
self.response.close()
for func in self._on_close:
func()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def freeze(self):
"""Call this method if you want to make your response object ready for
being pickled. This buffers the generator if there is one. It will
also set the `Content-Length` header to the length of the body.
.. versionchanged:: 0.6
The `Content-Length` header is now set.
"""
# we explicitly set the length to a list of the *encoded* response
# iterator. Even if the implicit sequence conversion is disabled.
self.response = list(self.iter_encoded())
self.headers["Content-Length"] = str(sum(map(len, self.response)))
def get_wsgi_headers(self, environ):
"""This is automatically called right before the response is started
and returns headers modified for the given environment. It returns a
copy of the headers from the response with some modifications applied
if necessary.
For example the location header (if present) is joined with the root
URL of the environment. Also the content length is automatically set
to zero here for certain status codes.
.. versionchanged:: 0.6
Previously that function was called `fix_headers` and modified
the response object in place. Also since 0.6, IRIs in location
and content-location headers are handled properly.
Also starting with 0.6, Werkzeug will attempt to set the content
length if it is able to figure it out on its own. This is the
case if all the strings in the response iterable are already
encoded and the iterable is buffered.
:param environ: the WSGI environment of the request.
:return: returns a new :class:`~werkzeug.datastructures.Headers`
object.
"""
headers = Headers(self.headers)
location = None
content_location = None
content_length = None
status = self.status_code
# iterate over the headers to find all values in one go. Because
# get_wsgi_headers is used each response that gives us a tiny
# speedup.
for key, value in headers:
ikey = key.lower()
if ikey == u"location":
location = value
elif ikey == u"content-location":
content_location = value
elif ikey == u"content-length":
content_length = value
# make sure the location header is an absolute URL
if location is not None:
old_location = location
if isinstance(location, text_type):
# Safe conversion is necessary here as we might redirect
# to a broken URI scheme (for instance itms-services).
location = iri_to_uri(location, safe_conversion=True)
if self.autocorrect_location_header:
current_url = get_current_url(environ, strip_querystring=True)
if isinstance(current_url, text_type):
current_url = iri_to_uri(current_url)
location = url_join(current_url, location)
if location != old_location:
headers["Location"] = location
# make sure the content location is a URL
if content_location is not None and isinstance(content_location, text_type):
headers["Content-Location"] = iri_to_uri(content_location)
if 100 <= status < 200 or status == 204:
# Per section 3.3.2 of RFC 7230, "a server MUST NOT send a
# Content-Length header field in any response with a status
# code of 1xx (Informational) or 204 (No Content)."
headers.remove("Content-Length")
elif status == 304:
remove_entity_headers(headers)
# if we can determine the content length automatically, we
# should try to do that. But only if this does not involve
# flattening the iterator or encoding of unicode strings in
# the response. We however should not do that if we have a 304
# response.
if (
self.automatically_set_content_length
and self.is_sequence
and content_length is None
and status not in (204, 304)
and not (100 <= status < 200)
):
try:
content_length = sum(len(to_bytes(x, "ascii")) for x in self.response)
except UnicodeError:
# aha, something non-bytestringy in there, too bad, we
# can't safely figure out the length of the response.
pass
else:
headers["Content-Length"] = str(content_length)
return headers
def get_app_iter(self, environ):
"""Returns the application iterator for the given environ. Depending
on the request method and the current status code the return value
might be an empty response rather than the one from the response.
If the request method is `HEAD` or the status code is in a range
where the HTTP specification requires an empty response, an empty
iterable is returned.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: a response iterable.
"""
status = self.status_code
if (
environ["REQUEST_METHOD"] == "HEAD"
or 100 <= status < 200
or status in (204, 304)
):
iterable = ()
elif self.direct_passthrough:
if __debug__:
_warn_if_string(self.response)
return self.response
else:
iterable = self.iter_encoded()
return ClosingIterator(iterable, self.close)
def get_wsgi_response(self, environ):
"""Returns the final WSGI response as tuple. The first item in
the tuple is the application iterator, the second the status and
the third the list of headers. The response returned is created
specially for the given environment. For example if the request
method in the WSGI environment is ``'HEAD'`` the response will
be empty and only the headers and status code will be present.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: an ``(app_iter, status, headers)`` tuple.
"""
headers = self.get_wsgi_headers(environ)
app_iter = self.get_app_iter(environ)
return app_iter, self.status, headers.to_wsgi_list()
def __call__(self, environ, start_response):
"""Process this response as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
:return: an application iterator
"""
app_iter, status, headers = self.get_wsgi_response(environ)
start_response(status, headers)
return app_iter
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/wrappers/request.py
|
from .accept import AcceptMixin
from .auth import AuthorizationMixin
from .base_request import BaseRequest
from .common_descriptors import CommonRequestDescriptorsMixin
from .cors import CORSRequestMixin
from .etag import ETagRequestMixin
from .user_agent import UserAgentMixin
class Request(
BaseRequest,
AcceptMixin,
ETagRequestMixin,
UserAgentMixin,
AuthorizationMixin,
CORSRequestMixin,
CommonRequestDescriptorsMixin,
):
"""Full featured request object implementing the following mixins:
- :class:`AcceptMixin` for accept header parsing
- :class:`ETagRequestMixin` for etag and cache control handling
- :class:`UserAgentMixin` for user agent introspection
- :class:`AuthorizationMixin` for http auth handling
- :class:`~werkzeug.wrappers.cors.CORSRequestMixin` for Cross
Origin Resource Sharing headers
- :class:`CommonRequestDescriptorsMixin` for common headers
"""
class StreamOnlyMixin(object):
"""If mixed in before the request object this will change the behavior
of it to disable handling of form parsing. This disables the
:attr:`files`, :attr:`form` attributes and will just provide a
:attr:`stream` attribute that however is always available.
.. versionadded:: 0.9
"""
disable_data_descriptor = True
want_form_data_parsed = False
class PlainRequest(StreamOnlyMixin, Request):
"""A request object without special form parsing capabilities.
.. versionadded:: 0.9
"""
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/wrappers/__init__.py
|
"""
werkzeug.wrappers
~~~~~~~~~~~~~~~~~
The wrappers are simple request and response objects which you can
subclass to do whatever you want them to do. The request object contains
the information transmitted by the client (webbrowser) and the response
object contains all the information sent back to the browser.
An important detail is that the request object is created with the WSGI
environ and will act as high-level proxy whereas the response object is an
actual WSGI application.
Like everything else in Werkzeug these objects will work correctly with
unicode data. Incoming form data parsed by the response object will be
decoded into an unicode object if possible and if it makes sense.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
from .accept import AcceptMixin
from .auth import AuthorizationMixin
from .auth import WWWAuthenticateMixin
from .base_request import BaseRequest
from .base_response import BaseResponse
from .common_descriptors import CommonRequestDescriptorsMixin
from .common_descriptors import CommonResponseDescriptorsMixin
from .etag import ETagRequestMixin
from .etag import ETagResponseMixin
from .request import PlainRequest
from .request import Request
from .request import StreamOnlyMixin
from .response import Response
from .response import ResponseStream
from .response import ResponseStreamMixin
from .user_agent import UserAgentMixin
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/wrappers/response.py
|
from ..utils import cached_property
from .auth import WWWAuthenticateMixin
from .base_response import BaseResponse
from .common_descriptors import CommonResponseDescriptorsMixin
from .cors import CORSResponseMixin
from .etag import ETagResponseMixin
class ResponseStream(object):
"""A file descriptor like object used by the :class:`ResponseStreamMixin` to
represent the body of the stream. It directly pushes into the response
iterable of the response object.
"""
mode = "wb+"
def __init__(self, response):
self.response = response
self.closed = False
def write(self, value):
if self.closed:
raise ValueError("I/O operation on closed file")
self.response._ensure_sequence(mutable=True)
self.response.response.append(value)
self.response.headers.pop("Content-Length", None)
return len(value)
def writelines(self, seq):
for item in seq:
self.write(item)
def close(self):
self.closed = True
def flush(self):
if self.closed:
raise ValueError("I/O operation on closed file")
def isatty(self):
if self.closed:
raise ValueError("I/O operation on closed file")
return False
def tell(self):
self.response._ensure_sequence()
return sum(map(len, self.response.response))
@property
def encoding(self):
return self.response.charset
class ResponseStreamMixin(object):
"""Mixin for :class:`BaseResponse` subclasses. Classes that inherit from
this mixin will automatically get a :attr:`stream` property that provides
a write-only interface to the response iterable.
"""
@cached_property
def stream(self):
"""The response iterable as write-only stream."""
return ResponseStream(self)
class Response(
BaseResponse,
ETagResponseMixin,
WWWAuthenticateMixin,
CORSResponseMixin,
ResponseStreamMixin,
CommonResponseDescriptorsMixin,
):
"""Full featured response object implementing the following mixins:
- :class:`ETagResponseMixin` for etag and cache control handling
- :class:`WWWAuthenticateMixin` for HTTP authentication support
- :class:`~werkzeug.wrappers.cors.CORSResponseMixin` for Cross
Origin Resource Sharing headers
- :class:`ResponseStreamMixin` to add support for the ``stream``
property
- :class:`CommonResponseDescriptorsMixin` for various HTTP
descriptors
"""
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/wrappers/etag.py
|
from .._compat import string_types
from .._internal import _get_environ
from ..datastructures import ContentRange
from ..datastructures import RequestCacheControl
from ..datastructures import ResponseCacheControl
from ..http import generate_etag
from ..http import http_date
from ..http import is_resource_modified
from ..http import parse_cache_control_header
from ..http import parse_content_range_header
from ..http import parse_date
from ..http import parse_etags
from ..http import parse_if_range_header
from ..http import parse_range_header
from ..http import quote_etag
from ..http import unquote_etag
from ..utils import cached_property
from ..utils import header_property
from ..wrappers.base_response import _clean_accept_ranges
from ..wsgi import _RangeWrapper
class ETagRequestMixin(object):
"""Add entity tag and cache descriptors to a request object or object with
a WSGI environment available as :attr:`~BaseRequest.environ`. This not
only provides access to etags but also to the cache control header.
"""
@cached_property
def cache_control(self):
"""A :class:`~werkzeug.datastructures.RequestCacheControl` object
for the incoming cache control headers.
"""
cache_control = self.environ.get("HTTP_CACHE_CONTROL")
return parse_cache_control_header(cache_control, None, RequestCacheControl)
@cached_property
def if_match(self):
"""An object containing all the etags in the `If-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get("HTTP_IF_MATCH"))
@cached_property
def if_none_match(self):
"""An object containing all the etags in the `If-None-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get("HTTP_IF_NONE_MATCH"))
@cached_property
def if_modified_since(self):
"""The parsed `If-Modified-Since` header as datetime object."""
return parse_date(self.environ.get("HTTP_IF_MODIFIED_SINCE"))
@cached_property
def if_unmodified_since(self):
"""The parsed `If-Unmodified-Since` header as datetime object."""
return parse_date(self.environ.get("HTTP_IF_UNMODIFIED_SINCE"))
@cached_property
def if_range(self):
"""The parsed `If-Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.IfRange`
"""
return parse_if_range_header(self.environ.get("HTTP_IF_RANGE"))
@cached_property
def range(self):
"""The parsed `Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.Range`
"""
return parse_range_header(self.environ.get("HTTP_RANGE"))
class ETagResponseMixin(object):
"""Adds extra functionality to a response object for etag and cache
handling. This mixin requires an object with at least a `headers`
object that implements a dict like interface similar to
:class:`~werkzeug.datastructures.Headers`.
If you want the :meth:`freeze` method to automatically add an etag, you
have to mixin this method before the response base class. The default
response class does not do that.
"""
@property
def cache_control(self):
"""The Cache-Control general-header field is used to specify
directives that MUST be obeyed by all caching mechanisms along the
request/response chain.
"""
def on_update(cache_control):
if not cache_control and "cache-control" in self.headers:
del self.headers["cache-control"]
elif cache_control:
self.headers["Cache-Control"] = cache_control.to_header()
return parse_cache_control_header(
self.headers.get("cache-control"), on_update, ResponseCacheControl
)
def _wrap_response(self, start, length):
"""Wrap existing Response in case of Range Request context."""
if self.status_code == 206:
self.response = _RangeWrapper(self.response, start, length)
def _is_range_request_processable(self, environ):
"""Return ``True`` if `Range` header is present and if underlying
resource is considered unchanged when compared with `If-Range` header.
"""
return (
"HTTP_IF_RANGE" not in environ
or not is_resource_modified(
environ,
self.headers.get("etag"),
None,
self.headers.get("last-modified"),
ignore_if_range=False,
)
) and "HTTP_RANGE" in environ
def _process_range_request(self, environ, complete_length=None, accept_ranges=None):
"""Handle Range Request related headers (RFC7233). If `Accept-Ranges`
header is valid, and Range Request is processable, we set the headers
as described by the RFC, and wrap the underlying response in a
RangeWrapper.
Returns ``True`` if Range Request can be fulfilled, ``False`` otherwise.
:raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
if `Range` header could not be parsed or satisfied.
"""
from ..exceptions import RequestedRangeNotSatisfiable
if (
accept_ranges is None
or complete_length is None
or not self._is_range_request_processable(environ)
):
return False
parsed_range = parse_range_header(environ.get("HTTP_RANGE"))
if parsed_range is None:
raise RequestedRangeNotSatisfiable(complete_length)
range_tuple = parsed_range.range_for_length(complete_length)
content_range_header = parsed_range.to_content_range_header(complete_length)
if range_tuple is None or content_range_header is None:
raise RequestedRangeNotSatisfiable(complete_length)
content_length = range_tuple[1] - range_tuple[0]
self.headers["Content-Length"] = content_length
self.headers["Accept-Ranges"] = accept_ranges
self.content_range = content_range_header
self.status_code = 206
self._wrap_response(range_tuple[0], content_length)
return True
def make_conditional(
self, request_or_environ, accept_ranges=False, complete_length=None
):
"""Make the response conditional to the request. This method works
best if an etag was defined for the response already. The `add_etag`
method can be used to do that. If called without etag just the date
header is set.
This does nothing if the request method in the request or environ is
anything but GET or HEAD.
For optimal performance when handling range requests, it's recommended
that your response data object implements `seekable`, `seek` and `tell`
methods as described by :py:class:`io.IOBase`. Objects returned by
:meth:`~werkzeug.wsgi.wrap_file` automatically implement those methods.
It does not remove the body of the response because that's something
the :meth:`__call__` function does for us automatically.
Returns self so that you can do ``return resp.make_conditional(req)``
but modifies the object in-place.
:param request_or_environ: a request object or WSGI environment to be
used to make the response conditional
against.
:param accept_ranges: This parameter dictates the value of
`Accept-Ranges` header. If ``False`` (default),
the header is not set. If ``True``, it will be set
to ``"bytes"``. If ``None``, it will be set to
``"none"``. If it's a string, it will use this
value.
:param complete_length: Will be used only in valid Range Requests.
It will set `Content-Range` complete length
value and compute `Content-Length` real value.
This parameter is mandatory for successful
Range Requests completion.
:raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
if `Range` header could not be parsed or satisfied.
"""
environ = _get_environ(request_or_environ)
if environ["REQUEST_METHOD"] in ("GET", "HEAD"):
# if the date is not in the headers, add it now. We however
# will not override an already existing header. Unfortunately
# this header will be overriden by many WSGI servers including
# wsgiref.
if "date" not in self.headers:
self.headers["Date"] = http_date()
accept_ranges = _clean_accept_ranges(accept_ranges)
is206 = self._process_range_request(environ, complete_length, accept_ranges)
if not is206 and not is_resource_modified(
environ,
self.headers.get("etag"),
None,
self.headers.get("last-modified"),
):
if parse_etags(environ.get("HTTP_IF_MATCH")):
self.status_code = 412
else:
self.status_code = 304
if (
self.automatically_set_content_length
and "content-length" not in self.headers
):
length = self.calculate_content_length()
if length is not None:
self.headers["Content-Length"] = length
return self
def add_etag(self, overwrite=False, weak=False):
"""Add an etag for the current response if there is none yet."""
if overwrite or "etag" not in self.headers:
self.set_etag(generate_etag(self.get_data()), weak)
def set_etag(self, etag, weak=False):
"""Set the etag, and override the old one if there was one."""
self.headers["ETag"] = quote_etag(etag, weak)
def get_etag(self):
"""Return a tuple in the form ``(etag, is_weak)``. If there is no
ETag the return value is ``(None, None)``.
"""
return unquote_etag(self.headers.get("ETag"))
def freeze(self, no_etag=False):
"""Call this method if you want to make your response object ready for
pickeling. This buffers the generator if there is one. This also
sets the etag unless `no_etag` is set to `True`.
"""
if not no_etag:
self.add_etag()
super(ETagResponseMixin, self).freeze()
accept_ranges = header_property(
"Accept-Ranges",
doc="""The `Accept-Ranges` header. Even though the name would
indicate that multiple values are supported, it must be one
string token only.
The values ``'bytes'`` and ``'none'`` are common.
.. versionadded:: 0.7""",
)
@property
def content_range(self):
"""The ``Content-Range`` header as a
:class:`~werkzeug.datastructures.ContentRange` object. Available
even if the header is not set.
.. versionadded:: 0.7
"""
def on_update(rng):
if not rng:
del self.headers["content-range"]
else:
self.headers["Content-Range"] = rng.to_header()
rv = parse_content_range_header(self.headers.get("content-range"), on_update)
# always provide a content range object to make the descriptor
# more user friendly. It provides an unset() method that can be
# used to remove the header quickly.
if rv is None:
rv = ContentRange(None, None, None, on_update=on_update)
return rv
@content_range.setter
def content_range(self, value):
if not value:
del self.headers["content-range"]
elif isinstance(value, string_types):
self.headers["Content-Range"] = value
else:
self.headers["Content-Range"] = value.to_header()
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/wrappers/common_descriptors.py
|
from datetime import datetime
from datetime import timedelta
from .._compat import string_types
from ..datastructures import CallbackDict
from ..http import dump_age
from ..http import dump_csp_header
from ..http import dump_header
from ..http import dump_options_header
from ..http import http_date
from ..http import parse_age
from ..http import parse_csp_header
from ..http import parse_date
from ..http import parse_options_header
from ..http import parse_set_header
from ..utils import cached_property
from ..utils import environ_property
from ..utils import get_content_type
from ..utils import header_property
from ..wsgi import get_content_length
class CommonRequestDescriptorsMixin(object):
"""A mixin for :class:`BaseRequest` subclasses. Request objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
.. versionadded:: 0.5
"""
content_type = environ_property(
"CONTENT_TYPE",
doc="""The Content-Type entity-header field indicates the media
type of the entity-body sent to the recipient or, in the case of
the HEAD method, the media type that would have been sent had
the request been a GET.""",
)
@cached_property
def content_length(self):
"""The Content-Length entity-header field indicates the size of the
entity-body in bytes or, in the case of the HEAD method, the size of
the entity-body that would have been sent had the request been a
GET.
"""
return get_content_length(self.environ)
content_encoding = environ_property(
"HTTP_CONTENT_ENCODING",
doc="""The Content-Encoding entity-header field is used as a
modifier to the media-type. When present, its value indicates
what additional content codings have been applied to the
entity-body, and thus what decoding mechanisms must be applied
in order to obtain the media-type referenced by the Content-Type
header field.
.. versionadded:: 0.9""",
)
content_md5 = environ_property(
"HTTP_CONTENT_MD5",
doc="""The Content-MD5 entity-header field, as defined in
RFC 1864, is an MD5 digest of the entity-body for the purpose of
providing an end-to-end message integrity check (MIC) of the
entity-body. (Note: a MIC is good for detecting accidental
modification of the entity-body in transit, but is not proof
against malicious attacks.)
.. versionadded:: 0.9""",
)
referrer = environ_property(
"HTTP_REFERER",
doc="""The Referer[sic] request-header field allows the client
to specify, for the server's benefit, the address (URI) of the
resource from which the Request-URI was obtained (the
"referrer", although the header field is misspelled).""",
)
date = environ_property(
"HTTP_DATE",
None,
parse_date,
doc="""The Date general-header field represents the date and
time at which the message was originated, having the same
semantics as orig-date in RFC 822.""",
)
max_forwards = environ_property(
"HTTP_MAX_FORWARDS",
None,
int,
doc="""The Max-Forwards request-header field provides a
mechanism with the TRACE and OPTIONS methods to limit the number
of proxies or gateways that can forward the request to the next
inbound server.""",
)
def _parse_content_type(self):
if not hasattr(self, "_parsed_content_type"):
self._parsed_content_type = parse_options_header(
self.environ.get("CONTENT_TYPE", "")
)
@property
def mimetype(self):
"""Like :attr:`content_type`, but without parameters (eg, without
charset, type etc.) and always lowercase. For example if the content
type is ``text/HTML; charset=utf-8`` the mimetype would be
``'text/html'``.
"""
self._parse_content_type()
return self._parsed_content_type[0].lower()
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
"""
self._parse_content_type()
return self._parsed_content_type[1]
@cached_property
def pragma(self):
"""The Pragma general-header field is used to include
implementation-specific directives that might apply to any recipient
along the request/response chain. All pragma directives specify
optional behavior from the viewpoint of the protocol; however, some
systems MAY require that behavior be consistent with the directives.
"""
return parse_set_header(self.environ.get("HTTP_PRAGMA", ""))
class CommonResponseDescriptorsMixin(object):
"""A mixin for :class:`BaseResponse` subclasses. Response objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
"""
@property
def mimetype(self):
"""The mimetype (content type without charset etc.)"""
ct = self.headers.get("content-type")
if ct:
return ct.split(";")[0].strip()
@mimetype.setter
def mimetype(self, value):
self.headers["Content-Type"] = get_content_type(value, self.charset)
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the
content type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.5
"""
def on_update(d):
self.headers["Content-Type"] = dump_options_header(self.mimetype, d)
d = parse_options_header(self.headers.get("content-type", ""))[1]
return CallbackDict(d, on_update)
location = header_property(
"Location",
doc="""The Location response-header field is used to redirect
the recipient to a location other than the Request-URI for
completion of the request or identification of a new
resource.""",
)
age = header_property(
"Age",
None,
parse_age,
dump_age,
doc="""The Age response-header field conveys the sender's
estimate of the amount of time since the response (or its
revalidation) was generated at the origin server.
Age values are non-negative decimal integers, representing time
in seconds.""",
)
content_type = header_property(
"Content-Type",
doc="""The Content-Type entity-header field indicates the media
type of the entity-body sent to the recipient or, in the case of
the HEAD method, the media type that would have been sent had
the request been a GET.""",
)
content_length = header_property(
"Content-Length",
None,
int,
str,
doc="""The Content-Length entity-header field indicates the size
of the entity-body, in decimal number of OCTETs, sent to the
recipient or, in the case of the HEAD method, the size of the
entity-body that would have been sent had the request been a
GET.""",
)
content_location = header_property(
"Content-Location",
doc="""The Content-Location entity-header field MAY be used to
supply the resource location for the entity enclosed in the
message when that entity is accessible from a location separate
from the requested resource's URI.""",
)
content_encoding = header_property(
"Content-Encoding",
doc="""The Content-Encoding entity-header field is used as a
modifier to the media-type. When present, its value indicates
what additional content codings have been applied to the
entity-body, and thus what decoding mechanisms must be applied
in order to obtain the media-type referenced by the Content-Type
header field.""",
)
content_md5 = header_property(
"Content-MD5",
doc="""The Content-MD5 entity-header field, as defined in
RFC 1864, is an MD5 digest of the entity-body for the purpose of
providing an end-to-end message integrity check (MIC) of the
entity-body. (Note: a MIC is good for detecting accidental
modification of the entity-body in transit, but is not proof
against malicious attacks.)""",
)
content_security_policy = header_property(
"Content-Security-Policy",
None,
parse_csp_header,
dump_csp_header,
doc="""The Content-Security-Policy header adds an additional layer of
security to help detect and mitigate certain types of attacks.""",
)
content_security_policy_report_only = header_property(
"Content-Security-Policy-Report-Only",
None,
parse_csp_header,
dump_csp_header,
doc="""The Content-Security-Policy-Report-Only header adds a csp policy
that is not enforced but is reported thereby helping detect
certain types of attacks.""",
)
date = header_property(
"Date",
None,
parse_date,
http_date,
doc="""The Date general-header field represents the date and
time at which the message was originated, having the same
semantics as orig-date in RFC 822.""",
)
expires = header_property(
"Expires",
None,
parse_date,
http_date,
doc="""The Expires entity-header field gives the date/time after
which the response is considered stale. A stale cache entry may
not normally be returned by a cache.""",
)
last_modified = header_property(
"Last-Modified",
None,
parse_date,
http_date,
doc="""The Last-Modified entity-header field indicates the date
and time at which the origin server believes the variant was
last modified.""",
)
@property
def retry_after(self):
"""The Retry-After response-header field can be used with a
503 (Service Unavailable) response to indicate how long the
service is expected to be unavailable to the requesting client.
Time in seconds until expiration or date.
"""
value = self.headers.get("retry-after")
if value is None:
return
elif value.isdigit():
return datetime.utcnow() + timedelta(seconds=int(value))
return parse_date(value)
@retry_after.setter
def retry_after(self, value):
if value is None:
if "retry-after" in self.headers:
del self.headers["retry-after"]
return
elif isinstance(value, datetime):
value = http_date(value)
else:
value = str(value)
self.headers["Retry-After"] = value
def _set_property(name, doc=None): # noqa: B902
def fget(self):
def on_update(header_set):
if not header_set and name in self.headers:
del self.headers[name]
elif header_set:
self.headers[name] = header_set.to_header()
return parse_set_header(self.headers.get(name), on_update)
def fset(self, value):
if not value:
del self.headers[name]
elif isinstance(value, string_types):
self.headers[name] = value
else:
self.headers[name] = dump_header(value)
return property(fget, fset, doc=doc)
vary = _set_property(
"Vary",
doc="""The Vary field value indicates the set of request-header
fields that fully determines, while the response is fresh,
whether a cache is permitted to use the response to reply to a
subsequent request without revalidation.""",
)
content_language = _set_property(
"Content-Language",
doc="""The Content-Language entity-header field describes the
natural language(s) of the intended audience for the enclosed
entity. Note that this might not be equivalent to all the
languages used within the entity-body.""",
)
allow = _set_property(
"Allow",
doc="""The Allow entity-header field lists the set of methods
supported by the resource identified by the Request-URI. The
purpose of this field is strictly to inform the recipient of
valid methods associated with the resource. An Allow header
field MUST be present in a 405 (Method Not Allowed)
response.""",
)
del _set_property
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/wrappers/accept.py
|
from ..datastructures import CharsetAccept
from ..datastructures import LanguageAccept
from ..datastructures import MIMEAccept
from ..http import parse_accept_header
from ..utils import cached_property
class AcceptMixin(object):
"""A mixin for classes with an :attr:`~BaseResponse.environ` attribute
to get all the HTTP accept headers as
:class:`~werkzeug.datastructures.Accept` objects (or subclasses
thereof).
"""
@cached_property
def accept_mimetypes(self):
"""List of mimetypes this client supports as
:class:`~werkzeug.datastructures.MIMEAccept` object.
"""
return parse_accept_header(self.environ.get("HTTP_ACCEPT"), MIMEAccept)
@cached_property
def accept_charsets(self):
"""List of charsets this client supports as
:class:`~werkzeug.datastructures.CharsetAccept` object.
"""
return parse_accept_header(
self.environ.get("HTTP_ACCEPT_CHARSET"), CharsetAccept
)
@cached_property
def accept_encodings(self):
"""List of encodings this client accepts. Encodings in a HTTP term
are compression encodings such as gzip. For charsets have a look at
:attr:`accept_charset`.
"""
return parse_accept_header(self.environ.get("HTTP_ACCEPT_ENCODING"))
@cached_property
def accept_languages(self):
"""List of languages this client accepts as
:class:`~werkzeug.datastructures.LanguageAccept` object.
.. versionchanged 0.5
In previous versions this was a regular
:class:`~werkzeug.datastructures.Accept` object.
"""
return parse_accept_header(
self.environ.get("HTTP_ACCEPT_LANGUAGE"), LanguageAccept
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/wrappers/json.py
|
from __future__ import absolute_import
import datetime
import uuid
from .._compat import text_type
from ..exceptions import BadRequest
from ..utils import detect_utf_encoding
try:
import simplejson as _json
except ImportError:
import json as _json
class _JSONModule(object):
@staticmethod
def _default(o):
if isinstance(o, datetime.date):
return o.isoformat()
if isinstance(o, uuid.UUID):
return str(o)
if hasattr(o, "__html__"):
return text_type(o.__html__())
raise TypeError()
@classmethod
def dumps(cls, obj, **kw):
kw.setdefault("separators", (",", ":"))
kw.setdefault("default", cls._default)
kw.setdefault("sort_keys", True)
return _json.dumps(obj, **kw)
@staticmethod
def loads(s, **kw):
if isinstance(s, bytes):
# Needed for Python < 3.6
encoding = detect_utf_encoding(s)
s = s.decode(encoding)
return _json.loads(s, **kw)
class JSONMixin(object):
"""Mixin to parse :attr:`data` as JSON. Can be mixed in for both
:class:`~werkzeug.wrappers.Request` and
:class:`~werkzeug.wrappers.Response` classes.
If `simplejson`_ is installed it is preferred over Python's built-in
:mod:`json` module.
.. _simplejson: https://simplejson.readthedocs.io/en/latest/
"""
#: A module or other object that has ``dumps`` and ``loads``
#: functions that match the API of the built-in :mod:`json` module.
json_module = _JSONModule
@property
def json(self):
"""The parsed JSON data if :attr:`mimetype` indicates JSON
(:mimetype:`application/json`, see :meth:`is_json`).
Calls :meth:`get_json` with default arguments.
"""
return self.get_json()
@property
def is_json(self):
"""Check if the mimetype indicates JSON data, either
:mimetype:`application/json` or :mimetype:`application/*+json`.
"""
mt = self.mimetype
return (
mt == "application/json"
or mt.startswith("application/")
and mt.endswith("+json")
)
def _get_data_for_json(self, cache):
try:
return self.get_data(cache=cache)
except TypeError:
# Response doesn't have cache param.
return self.get_data()
# Cached values for ``(silent=False, silent=True)``. Initialized
# with sentinel values.
_cached_json = (Ellipsis, Ellipsis)
def get_json(self, force=False, silent=False, cache=True):
"""Parse :attr:`data` as JSON.
If the mimetype does not indicate JSON
(:mimetype:`application/json`, see :meth:`is_json`), this
returns ``None``.
If parsing fails, :meth:`on_json_loading_failed` is called and
its return value is used as the return value.
:param force: Ignore the mimetype and always try to parse JSON.
:param silent: Silence parsing errors and return ``None``
instead.
:param cache: Store the parsed JSON to return for subsequent
calls.
"""
if cache and self._cached_json[silent] is not Ellipsis:
return self._cached_json[silent]
if not (force or self.is_json):
return None
data = self._get_data_for_json(cache=cache)
try:
rv = self.json_module.loads(data)
except ValueError as e:
if silent:
rv = None
if cache:
normal_rv, _ = self._cached_json
self._cached_json = (normal_rv, rv)
else:
rv = self.on_json_loading_failed(e)
if cache:
_, silent_rv = self._cached_json
self._cached_json = (rv, silent_rv)
else:
if cache:
self._cached_json = (rv, rv)
return rv
def on_json_loading_failed(self, e):
"""Called if :meth:`get_json` parsing fails and isn't silenced.
If this method returns a value, it is used as the return value
for :meth:`get_json`. The default implementation raises
:exc:`~werkzeug.exceptions.BadRequest`.
"""
raise BadRequest("Failed to decode JSON object: {0}".format(e))
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/wrappers/base_request.py
|
from functools import update_wrapper
from io import BytesIO
from .._compat import to_native
from .._compat import to_unicode
from .._compat import wsgi_decoding_dance
from .._compat import wsgi_get_bytes
from ..datastructures import CombinedMultiDict
from ..datastructures import EnvironHeaders
from ..datastructures import ImmutableList
from ..datastructures import ImmutableMultiDict
from ..datastructures import iter_multi_items
from ..datastructures import MultiDict
from ..formparser import default_stream_factory
from ..formparser import FormDataParser
from ..http import parse_cookie
from ..http import parse_list_header
from ..http import parse_options_header
from ..urls import url_decode
from ..utils import cached_property
from ..utils import environ_property
from ..wsgi import get_content_length
from ..wsgi import get_current_url
from ..wsgi import get_host
from ..wsgi import get_input_stream
class BaseRequest(object):
"""Very basic request object. This does not implement advanced stuff like
entity tag parsing or cache controls. The request object is created with
the WSGI environment as first argument and will add itself to the WSGI
environment as ``'werkzeug.request'`` unless it's created with
`populate_request` set to False.
There are a couple of mixins available that add additional functionality
to the request object, there is also a class called `Request` which
subclasses `BaseRequest` and all the important mixins.
It's a good idea to create a custom subclass of the :class:`BaseRequest`
and add missing functionality either via mixins or direct implementation.
Here an example for such subclasses::
from werkzeug.wrappers import BaseRequest, ETagRequestMixin
class Request(BaseRequest, ETagRequestMixin):
pass
Request objects are **read only**. As of 0.5 modifications are not
allowed in any place. Unlike the lower level parsing functions the
request object will use immutable objects everywhere possible.
Per default the request object will assume all the text data is `utf-8`
encoded. Please refer to :doc:`the unicode chapter </unicode>` for more
details about customizing the behavior.
Per default the request object will be added to the WSGI
environment as `werkzeug.request` to support the debugging system.
If you don't want that, set `populate_request` to `False`.
If `shallow` is `True` the environment is initialized as shallow
object around the environ. Every operation that would modify the
environ in any way (such as consuming form data) raises an exception
unless the `shallow` attribute is explicitly set to `False`. This
is useful for middlewares where you don't want to consume the form
data by accident. A shallow request is not populated to the WSGI
environment.
.. versionchanged:: 0.5
read-only mode was enforced by using immutables classes for all
data.
"""
#: the charset for the request, defaults to utf-8
charset = "utf-8"
#: the error handling procedure for errors, defaults to 'replace'
encoding_errors = "replace"
#: the maximum content length. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: parsing fails because more than the specified value is transmitted
#: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_content_length = None
#: the maximum form field size. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: data in memory for post data is longer than the specified value a
#: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_form_memory_size = None
#: the class to use for `args` and `form`. The default is an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
#: multiple values per key. alternatively it makes sense to use an
#: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
#: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
#: which is the fastest but only remembers the last key. It is also
#: possible to use mutable structures, but this is not recommended.
#:
#: .. versionadded:: 0.6
parameter_storage_class = ImmutableMultiDict
#: the type to be used for list values from the incoming WSGI environment.
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
#: (for example for :attr:`access_list`).
#:
#: .. versionadded:: 0.6
list_storage_class = ImmutableList
#: The type to be used for dict values from the incoming WSGI
#: environment. (For example for :attr:`cookies`.) By default an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` is used.
#:
#: .. versionchanged:: 1.0.0
#: Changed to ``ImmutableMultiDict`` to support multiple values.
#:
#: .. versionadded:: 0.6
dict_storage_class = ImmutableMultiDict
#: The form data parser that shoud be used. Can be replaced to customize
#: the form date parsing.
form_data_parser_class = FormDataParser
#: Optionally a list of hosts that is trusted by this request. By default
#: all hosts are trusted which means that whatever the client sends the
#: host is will be accepted.
#:
#: Because `Host` and `X-Forwarded-Host` headers can be set to any value by
#: a malicious client, it is recommended to either set this property or
#: implement similar validation in the proxy (if application is being run
#: behind one).
#:
#: .. versionadded:: 0.9
trusted_hosts = None
#: Indicates whether the data descriptor should be allowed to read and
#: buffer up the input stream. By default it's enabled.
#:
#: .. versionadded:: 0.9
disable_data_descriptor = False
def __init__(self, environ, populate_request=True, shallow=False):
self.environ = environ
if populate_request and not shallow:
self.environ["werkzeug.request"] = self
self.shallow = shallow
def __repr__(self):
# make sure the __repr__ even works if the request was created
# from an invalid WSGI environment. If we display the request
# in a debug session we don't want the repr to blow up.
args = []
try:
args.append("'%s'" % to_native(self.url, self.url_charset))
args.append("[%s]" % self.method)
except Exception:
args.append("(invalid WSGI environ)")
return "<%s %s>" % (self.__class__.__name__, " ".join(args))
@property
def url_charset(self):
"""The charset that is assumed for URLs. Defaults to the value
of :attr:`charset`.
.. versionadded:: 0.6
"""
return self.charset
@classmethod
def from_values(cls, *args, **kwargs):
"""Create a new request object based on the values provided. If
environ is given missing values are filled from there. This method is
useful for small scripts when you need to simulate a request from an URL.
Do not use this method for unittesting, there is a full featured client
object (:class:`Client`) that allows to create multipart requests,
support for cookies etc.
This accepts the same options as the
:class:`~werkzeug.test.EnvironBuilder`.
.. versionchanged:: 0.5
This method now accepts the same arguments as
:class:`~werkzeug.test.EnvironBuilder`. Because of this the
`environ` parameter is now called `environ_overrides`.
:return: request object
"""
from ..test import EnvironBuilder
charset = kwargs.pop("charset", cls.charset)
kwargs["charset"] = charset
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_request(cls)
finally:
builder.close()
@classmethod
def application(cls, f):
"""Decorate a function as responder that accepts the request as
the last argument. This works like the :func:`responder`
decorator but the function is passed the request object as the
last argument and the request object will be closed
automatically::
@Request.application
def my_wsgi_app(request):
return Response('Hello World!')
As of Werkzeug 0.14 HTTP exceptions are automatically caught and
converted to responses instead of failing.
:param f: the WSGI callable to decorate
:return: a new WSGI callable
"""
#: return a callable that wraps the -2nd argument with the request
#: and calls the function with all the arguments up to that one and
#: the request. The return value is then called with the latest
#: two arguments. This makes it possible to use this decorator for
#: both standalone WSGI functions as well as bound methods and
#: partially applied functions.
from ..exceptions import HTTPException
def application(*args):
request = cls(args[-2])
with request:
try:
resp = f(*args[:-2] + (request,))
except HTTPException as e:
resp = e.get_response(args[-2])
return resp(*args[-2:])
return update_wrapper(application, f)
def _get_file_stream(
self, total_content_length, content_type, filename=None, content_length=None
):
"""Called to get a stream for the file upload.
This must provide a file-like class with `read()`, `readline()`
and `seek()` methods that is both writeable and readable.
The default implementation returns a temporary file if the total
content length is higher than 500KB. Because many browsers do not
provide a content length for the files only the total content
length matters.
:param total_content_length: the total content length of all the
data in the request combined. This value
is guaranteed to be there.
:param content_type: the mimetype of the uploaded file.
:param filename: the filename of the uploaded file. May be `None`.
:param content_length: the length of this file. This value is usually
not provided because webbrowsers do not provide
this value.
"""
return default_stream_factory(
total_content_length=total_content_length,
filename=filename,
content_type=content_type,
content_length=content_length,
)
@property
def want_form_data_parsed(self):
"""Returns True if the request method carries content. As of
Werkzeug 0.9 this will be the case if a content type is transmitted.
.. versionadded:: 0.8
"""
return bool(self.environ.get("CONTENT_TYPE"))
def make_form_data_parser(self):
"""Creates the form data parser. Instantiates the
:attr:`form_data_parser_class` with some parameters.
.. versionadded:: 0.8
"""
return self.form_data_parser_class(
self._get_file_stream,
self.charset,
self.encoding_errors,
self.max_form_memory_size,
self.max_content_length,
self.parameter_storage_class,
)
def _load_form_data(self):
"""Method used internally to retrieve submitted data. After calling
this sets `form` and `files` on the request object to multi dicts
filled with the incoming form data. As a matter of fact the input
stream will be empty afterwards. You can also call this method to
force the parsing of the form data.
.. versionadded:: 0.8
"""
# abort early if we have already consumed the stream
if "form" in self.__dict__:
return
_assert_not_shallow(self)
if self.want_form_data_parsed:
content_type = self.environ.get("CONTENT_TYPE", "")
content_length = get_content_length(self.environ)
mimetype, options = parse_options_header(content_type)
parser = self.make_form_data_parser()
data = parser.parse(
self._get_stream_for_parsing(), mimetype, content_length, options
)
else:
data = (
self.stream,
self.parameter_storage_class(),
self.parameter_storage_class(),
)
# inject the values into the instance dict so that we bypass
# our cached_property non-data descriptor.
d = self.__dict__
d["stream"], d["form"], d["files"] = data
def _get_stream_for_parsing(self):
"""This is the same as accessing :attr:`stream` with the difference
that if it finds cached data from calling :meth:`get_data` first it
will create a new stream out of the cached data.
.. versionadded:: 0.9.3
"""
cached_data = getattr(self, "_cached_data", None)
if cached_data is not None:
return BytesIO(cached_data)
return self.stream
def close(self):
"""Closes associated resources of this request object. This
closes all file handles explicitly. You can also use the request
object in a with statement which will automatically close it.
.. versionadded:: 0.9
"""
files = self.__dict__.get("files")
for _key, value in iter_multi_items(files or ()):
value.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
@cached_property
def stream(self):
"""
If the incoming form data was not encoded with a known mimetype
the data is stored unmodified in this stream for consumption. Most
of the time it is a better idea to use :attr:`data` which will give
you that data as a string. The stream only returns the data once.
Unlike :attr:`input_stream` this stream is properly guarded that you
can't accidentally read past the length of the input. Werkzeug will
internally always refer to this stream to read data which makes it
possible to wrap this object with a stream that does filtering.
.. versionchanged:: 0.9
This stream is now always available but might be consumed by the
form parser later on. Previously the stream was only set if no
parsing happened.
"""
_assert_not_shallow(self)
return get_input_stream(self.environ)
input_stream = environ_property(
"wsgi.input",
"""The WSGI input stream.
In general it's a bad idea to use this one because you can
easily read past the boundary. Use the :attr:`stream`
instead.""",
)
@cached_property
def args(self):
"""The parsed URL parameters (the part in the URL after the question
mark).
By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
return url_decode(
wsgi_get_bytes(self.environ.get("QUERY_STRING", "")),
self.url_charset,
errors=self.encoding_errors,
cls=self.parameter_storage_class,
)
@cached_property
def data(self):
"""
Contains the incoming request data as string in case it came with
a mimetype Werkzeug does not handle.
"""
if self.disable_data_descriptor:
raise AttributeError("data descriptor is disabled")
# XXX: this should eventually be deprecated.
# We trigger form data parsing first which means that the descriptor
# will not cache the data that would otherwise be .form or .files
# data. This restores the behavior that was there in Werkzeug
# before 0.9. New code should use :meth:`get_data` explicitly as
# this will make behavior explicit.
return self.get_data(parse_form_data=True)
def get_data(self, cache=True, as_text=False, parse_form_data=False):
"""This reads the buffered incoming data from the client into one
bytestring. By default this is cached but that behavior can be
changed by setting `cache` to `False`.
Usually it's a bad idea to call this method without checking the
content length first as a client could send dozens of megabytes or more
to cause memory problems on the server.
Note that if the form data was already parsed this method will not
return anything as form data parsing does not cache the data like
this method does. To implicitly invoke form data parsing function
set `parse_form_data` to `True`. When this is done the return value
of this method will be an empty string if the form parser handles
the data. This generally is not necessary as if the whole data is
cached (which is the default) the form parser will used the cached
data to parse the form data. Please be generally aware of checking
the content length first in any case before calling this method
to avoid exhausting server memory.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
rv = getattr(self, "_cached_data", None)
if rv is None:
if parse_form_data:
self._load_form_data()
rv = self.stream.read()
if cache:
self._cached_data = rv
if as_text:
rv = rv.decode(self.charset, self.encoding_errors)
return rv
@cached_property
def form(self):
"""The form parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
Please keep in mind that file uploads will not end up here, but instead
in the :attr:`files` attribute.
.. versionchanged:: 0.9
Previous to Werkzeug 0.9 this would only contain form data for POST
and PUT requests.
"""
self._load_form_data()
return self.form
@cached_property
def values(self):
"""A :class:`werkzeug.datastructures.CombinedMultiDict` that combines
:attr:`args` and :attr:`form`."""
args = []
for d in self.args, self.form:
if not isinstance(d, MultiDict):
d = MultiDict(d)
args.append(d)
return CombinedMultiDict(args)
@cached_property
def files(self):
""":class:`~werkzeug.datastructures.MultiDict` object containing
all uploaded files. Each key in :attr:`files` is the name from the
``<input type="file" name="">``. Each value in :attr:`files` is a
Werkzeug :class:`~werkzeug.datastructures.FileStorage` object.
It basically behaves like a standard file object you know from Python,
with the difference that it also has a
:meth:`~werkzeug.datastructures.FileStorage.save` function that can
store the file on the filesystem.
Note that :attr:`files` will only contain data if the request method was
POST, PUT or PATCH and the ``<form>`` that posted to the request had
``enctype="multipart/form-data"``. It will be empty otherwise.
See the :class:`~werkzeug.datastructures.MultiDict` /
:class:`~werkzeug.datastructures.FileStorage` documentation for
more details about the used data structure.
"""
self._load_form_data()
return self.files
@cached_property
def cookies(self):
"""A :class:`dict` with the contents of all cookies transmitted with
the request."""
return parse_cookie(
self.environ,
self.charset,
self.encoding_errors,
cls=self.dict_storage_class,
)
@cached_property
def headers(self):
"""The headers from the WSGI environ as immutable
:class:`~werkzeug.datastructures.EnvironHeaders`.
"""
return EnvironHeaders(self.environ)
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will always include a leading slash,
even if the URL root is accessed.
"""
raw_path = wsgi_decoding_dance(
self.environ.get("PATH_INFO") or "", self.charset, self.encoding_errors
)
return "/" + raw_path.lstrip("/")
@cached_property
def full_path(self):
"""Requested path as unicode, including the query string."""
return self.path + u"?" + to_unicode(self.query_string, self.url_charset)
@cached_property
def script_root(self):
"""The root path of the script without the trailing slash."""
raw_path = wsgi_decoding_dance(
self.environ.get("SCRIPT_NAME") or "", self.charset, self.encoding_errors
)
return raw_path.rstrip("/")
@cached_property
def url(self):
"""The reconstructed current URL as IRI.
See also: :attr:`trusted_hosts`.
"""
return get_current_url(self.environ, trusted_hosts=self.trusted_hosts)
@cached_property
def base_url(self):
"""Like :attr:`url` but without the querystring
See also: :attr:`trusted_hosts`.
"""
return get_current_url(
self.environ, strip_querystring=True, trusted_hosts=self.trusted_hosts
)
@cached_property
def url_root(self):
"""The full URL root (with hostname), this is the application
root as IRI.
See also: :attr:`trusted_hosts`.
"""
return get_current_url(self.environ, True, trusted_hosts=self.trusted_hosts)
@cached_property
def host_url(self):
"""Just the host with scheme as IRI.
See also: :attr:`trusted_hosts`.
"""
return get_current_url(
self.environ, host_only=True, trusted_hosts=self.trusted_hosts
)
@cached_property
def host(self):
"""Just the host including the port if available.
See also: :attr:`trusted_hosts`.
"""
return get_host(self.environ, trusted_hosts=self.trusted_hosts)
query_string = environ_property(
"QUERY_STRING",
"",
read_only=True,
load_func=wsgi_get_bytes,
doc="The URL parameters as raw bytestring.",
)
method = environ_property(
"REQUEST_METHOD",
"GET",
read_only=True,
load_func=lambda x: x.upper(),
doc="The request method. (For example ``'GET'`` or ``'POST'``).",
)
@cached_property
def access_route(self):
"""If a forwarded header exists this is a list of all ip addresses
from the client ip to the last proxy server.
"""
if "HTTP_X_FORWARDED_FOR" in self.environ:
return self.list_storage_class(
parse_list_header(self.environ["HTTP_X_FORWARDED_FOR"])
)
elif "REMOTE_ADDR" in self.environ:
return self.list_storage_class([self.environ["REMOTE_ADDR"]])
return self.list_storage_class()
@property
def remote_addr(self):
"""The remote address of the client."""
return self.environ.get("REMOTE_ADDR")
remote_user = environ_property(
"REMOTE_USER",
doc="""If the server supports user authentication, and the
script is protected, this attribute contains the username the
user has authenticated as.""",
)
scheme = environ_property(
"wsgi.url_scheme",
doc="""
URL scheme (http or https).
.. versionadded:: 0.7""",
)
is_secure = property(
lambda self: self.environ["wsgi.url_scheme"] == "https",
doc="`True` if the request is secure.",
)
is_multithread = environ_property(
"wsgi.multithread",
doc="""boolean that is `True` if the application is served by a
multithreaded WSGI server.""",
)
is_multiprocess = environ_property(
"wsgi.multiprocess",
doc="""boolean that is `True` if the application is served by a
WSGI server that spawns multiple processes.""",
)
is_run_once = environ_property(
"wsgi.run_once",
doc="""boolean that is `True` if the application will be
executed only once in a process lifetime. This is the case for
CGI for example, but it's not guaranteed that the execution only
happens one time.""",
)
def _assert_not_shallow(request):
if request.shallow:
raise RuntimeError(
"A shallow request tried to consume form data. If you really"
" want to do that, set `shallow` to False."
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/debug/console.py
|
# -*- coding: utf-8 -*-
"""
werkzeug.debug.console
~~~~~~~~~~~~~~~~~~~~~~
Interactive console support.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import code
import sys
from types import CodeType
from ..local import Local
from ..utils import escape
from .repr import debug_repr
from .repr import dump
from .repr import helper
_local = Local()
class HTMLStringO(object):
"""A StringO version that HTML escapes on write."""
def __init__(self):
self._buffer = []
def isatty(self):
return False
def close(self):
pass
def flush(self):
pass
def seek(self, n, mode=0):
pass
def readline(self):
if len(self._buffer) == 0:
return ""
ret = self._buffer[0]
del self._buffer[0]
return ret
def reset(self):
val = "".join(self._buffer)
del self._buffer[:]
return val
def _write(self, x):
if isinstance(x, bytes):
x = x.decode("utf-8", "replace")
self._buffer.append(x)
def write(self, x):
self._write(escape(x))
def writelines(self, x):
self._write(escape("".join(x)))
class ThreadedStream(object):
"""Thread-local wrapper for sys.stdout for the interactive console."""
@staticmethod
def push():
if not isinstance(sys.stdout, ThreadedStream):
sys.stdout = ThreadedStream()
_local.stream = HTMLStringO()
@staticmethod
def fetch():
try:
stream = _local.stream
except AttributeError:
return ""
return stream.reset()
@staticmethod
def displayhook(obj):
try:
stream = _local.stream
except AttributeError:
return _displayhook(obj)
# stream._write bypasses escaping as debug_repr is
# already generating HTML for us.
if obj is not None:
_local._current_ipy.locals["_"] = obj
stream._write(debug_repr(obj))
def __setattr__(self, name, value):
raise AttributeError("read only attribute %s" % name)
def __dir__(self):
return dir(sys.__stdout__)
def __getattribute__(self, name):
if name == "__members__":
return dir(sys.__stdout__)
try:
stream = _local.stream
except AttributeError:
stream = sys.__stdout__
return getattr(stream, name)
def __repr__(self):
return repr(sys.__stdout__)
# add the threaded stream as display hook
_displayhook = sys.displayhook
sys.displayhook = ThreadedStream.displayhook
class _ConsoleLoader(object):
def __init__(self):
self._storage = {}
def register(self, code, source):
self._storage[id(code)] = source
# register code objects of wrapped functions too.
for var in code.co_consts:
if isinstance(var, CodeType):
self._storage[id(var)] = source
def get_source_by_code(self, code):
try:
return self._storage[id(code)]
except KeyError:
pass
def _wrap_compiler(console):
compile = console.compile
def func(source, filename, symbol):
code = compile(source, filename, symbol)
console.loader.register(code, source)
return code
console.compile = func
class _InteractiveConsole(code.InteractiveInterpreter):
def __init__(self, globals, locals):
_locals = dict(globals)
_locals.update(locals)
locals = _locals
locals["dump"] = dump
locals["help"] = helper
locals["__loader__"] = self.loader = _ConsoleLoader()
code.InteractiveInterpreter.__init__(self, locals)
self.more = False
self.buffer = []
_wrap_compiler(self)
def runsource(self, source):
source = source.rstrip() + "\n"
ThreadedStream.push()
prompt = "... " if self.more else ">>> "
try:
source_to_eval = "".join(self.buffer + [source])
if code.InteractiveInterpreter.runsource(
self, source_to_eval, "<debugger>", "single"
):
self.more = True
self.buffer.append(source)
else:
self.more = False
del self.buffer[:]
finally:
output = ThreadedStream.fetch()
return prompt + escape(source) + output
def runcode(self, code):
try:
exec(code, self.locals)
except Exception:
self.showtraceback()
def showtraceback(self):
from .tbtools import get_current_traceback
tb = get_current_traceback(skip=1)
sys.stdout._write(tb.render_summary())
def showsyntaxerror(self, filename=None):
from .tbtools import get_current_traceback
tb = get_current_traceback(skip=4)
sys.stdout._write(tb.render_summary())
def write(self, data):
sys.stdout.write(data)
class Console(object):
"""An interactive console."""
def __init__(self, globals=None, locals=None):
if locals is None:
locals = {}
if globals is None:
globals = {}
self._ipy = _InteractiveConsole(globals, locals)
def eval(self, code):
_local._current_ipy = self._ipy
old_sys_stdout = sys.stdout
try:
return self._ipy.runsource(code)
finally:
sys.stdout = old_sys_stdout
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/debug/tbtools.py
|
# -*- coding: utf-8 -*-
"""
werkzeug.debug.tbtools
~~~~~~~~~~~~~~~~~~~~~~
This module provides various traceback related utility functions.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import inspect
import json
import os
import re
import sys
import sysconfig
import traceback
from tokenize import TokenError
from .._compat import PY2
from .._compat import range_type
from .._compat import reraise
from .._compat import string_types
from .._compat import text_type
from .._compat import to_native
from .._compat import to_unicode
from ..filesystem import get_filesystem_encoding
from ..utils import cached_property
from ..utils import escape
from .console import Console
_coding_re = re.compile(br"coding[:=]\s*([-\w.]+)")
_line_re = re.compile(br"^(.*?)$", re.MULTILINE)
_funcdef_re = re.compile(r"^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)")
UTF8_COOKIE = b"\xef\xbb\xbf"
system_exceptions = (SystemExit, KeyboardInterrupt)
try:
system_exceptions += (GeneratorExit,)
except NameError:
pass
HEADER = u"""\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>%(title)s // Werkzeug Debugger</title>
<link rel="stylesheet" href="?__debugger__=yes&cmd=resource&f=style.css"
type="text/css">
<!-- We need to make sure this has a favicon so that the debugger does
not by accident trigger a request to /favicon.ico which might
change the application state. -->
<link rel="shortcut icon"
href="?__debugger__=yes&cmd=resource&f=console.png">
<script src="?__debugger__=yes&cmd=resource&f=jquery.js"></script>
<script src="?__debugger__=yes&cmd=resource&f=debugger.js"></script>
<script type="text/javascript">
var TRACEBACK = %(traceback_id)d,
CONSOLE_MODE = %(console)s,
EVALEX = %(evalex)s,
EVALEX_TRUSTED = %(evalex_trusted)s,
SECRET = "%(secret)s";
</script>
</head>
<body style="background-color: #fff">
<div class="debugger">
"""
FOOTER = u"""\
<div class="footer">
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
friendly Werkzeug powered traceback interpreter.
</div>
</div>
<div class="pin-prompt">
<div class="inner">
<h3>Console Locked</h3>
<p>
The console is locked and needs to be unlocked by entering the PIN.
You can find the PIN printed out on the standard output of your
shell that runs the server.
<form>
<p>PIN:
<input type=text name=pin size=14>
<input type=submit name=btn value="Confirm Pin">
</form>
</div>
</div>
</body>
</html>
"""
PAGE_HTML = (
HEADER
+ u"""\
<h1>%(exception_type)s</h1>
<div class="detail">
<p class="errormsg">%(exception)s</p>
</div>
<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
%(summary)s
<div class="plain">
<form action="/?__debugger__=yes&cmd=paste" method="post">
<p>
<input type="hidden" name="language" value="pytb">
This is the Copy/Paste friendly version of the traceback. <span
class="pastemessage">You can also paste this traceback into
a <a href="https://gist.github.com/">gist</a>:
<input type="submit" value="create paste"></span>
</p>
<textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
</form>
</div>
<div class="explanation">
The debugger caught an exception in your WSGI application. You can now
look at the traceback which led to the error. <span class="nojavascript">
If you enable JavaScript you can also use additional features such as code
execution (if the evalex feature is enabled), automatic pasting of the
exceptions and much more.</span>
</div>
"""
+ FOOTER
+ """
<!--
%(plaintext_cs)s
-->
"""
)
CONSOLE_HTML = (
HEADER
+ u"""\
<h1>Interactive Console</h1>
<div class="explanation">
In this console you can execute Python expressions in the context of the
application. The initial namespace was created by the debugger automatically.
</div>
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
"""
+ FOOTER
)
SUMMARY_HTML = u"""\
<div class="%(classes)s">
%(title)s
<ul>%(frames)s</ul>
%(description)s
</div>
"""
FRAME_HTML = u"""\
<div class="frame" id="frame-%(id)d">
<h4>File <cite class="filename">"%(filename)s"</cite>,
line <em class="line">%(lineno)s</em>,
in <code class="function">%(function_name)s</code></h4>
<div class="source %(library)s">%(lines)s</div>
</div>
"""
SOURCE_LINE_HTML = u"""\
<tr class="%(classes)s">
<td class=lineno>%(lineno)s</td>
<td>%(code)s</td>
</tr>
"""
def render_console_html(secret, evalex_trusted=True):
return CONSOLE_HTML % {
"evalex": "true",
"evalex_trusted": "true" if evalex_trusted else "false",
"console": "true",
"title": "Console",
"secret": secret,
"traceback_id": -1,
}
def get_current_traceback(
ignore_system_exceptions=False, show_hidden_frames=False, skip=0
):
"""Get the current exception info as `Traceback` object. Per default
calling this method will reraise system exceptions such as generator exit,
system exit or others. This behavior can be disabled by passing `False`
to the function as first parameter.
"""
exc_type, exc_value, tb = sys.exc_info()
if ignore_system_exceptions and exc_type in system_exceptions:
reraise(exc_type, exc_value, tb)
for _ in range_type(skip):
if tb.tb_next is None:
break
tb = tb.tb_next
tb = Traceback(exc_type, exc_value, tb)
if not show_hidden_frames:
tb.filter_hidden_frames()
return tb
class Line(object):
"""Helper for the source renderer."""
__slots__ = ("lineno", "code", "in_frame", "current")
def __init__(self, lineno, code):
self.lineno = lineno
self.code = code
self.in_frame = False
self.current = False
@property
def classes(self):
rv = ["line"]
if self.in_frame:
rv.append("in-frame")
if self.current:
rv.append("current")
return rv
def render(self):
return SOURCE_LINE_HTML % {
"classes": u" ".join(self.classes),
"lineno": self.lineno,
"code": escape(self.code),
}
class Traceback(object):
"""Wraps a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
exception_type = exc_type.__name__
if exc_type.__module__ not in {"builtins", "__builtin__", "exceptions"}:
exception_type = exc_type.__module__ + "." + exception_type
self.exception_type = exception_type
self.groups = []
memo = set()
while True:
self.groups.append(Group(exc_type, exc_value, tb))
memo.add(id(exc_value))
if PY2:
break
exc_value = exc_value.__cause__ or exc_value.__context__
if exc_value is None or id(exc_value) in memo:
break
exc_type = type(exc_value)
tb = exc_value.__traceback__
self.groups.reverse()
self.frames = [frame for group in self.groups for frame in group.frames]
def filter_hidden_frames(self):
"""Remove the frames according to the paste spec."""
for group in self.groups:
group.filter_hidden_frames()
self.frames[:] = [frame for group in self.groups for frame in group.frames]
@property
def is_syntax_error(self):
"""Is it a syntax error?"""
return isinstance(self.exc_value, SyntaxError)
@property
def exception(self):
"""String representation of the final exception."""
return self.groups[-1].exception
def log(self, logfile=None):
"""Log the ASCII traceback into a file object."""
if logfile is None:
logfile = sys.stderr
tb = self.plaintext.rstrip() + u"\n"
logfile.write(to_native(tb, "utf-8", "replace"))
def paste(self):
"""Create a paste and return the paste id."""
data = json.dumps(
{
"description": "Werkzeug Internal Server Error",
"public": False,
"files": {"traceback.txt": {"content": self.plaintext}},
}
).encode("utf-8")
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
rv = urlopen("https://api.github.com/gists", data=data)
resp = json.loads(rv.read().decode("utf-8"))
rv.close()
return {"url": resp["html_url"], "id": resp["id"]}
def render_summary(self, include_title=True):
"""Render the traceback for the interactive console."""
title = ""
classes = ["traceback"]
if not self.frames:
classes.append("noframe-traceback")
frames = []
else:
library_frames = sum(frame.is_library for frame in self.frames)
mark_lib = 0 < library_frames < len(self.frames)
frames = [group.render(mark_lib=mark_lib) for group in self.groups]
if include_title:
if self.is_syntax_error:
title = u"Syntax Error"
else:
title = u"Traceback <em>(most recent call last)</em>:"
if self.is_syntax_error:
description_wrapper = u"<pre class=syntaxerror>%s</pre>"
else:
description_wrapper = u"<blockquote>%s</blockquote>"
return SUMMARY_HTML % {
"classes": u" ".join(classes),
"title": u"<h3>%s</h3>" % title if title else u"",
"frames": u"\n".join(frames),
"description": description_wrapper % escape(self.exception),
}
def render_full(self, evalex=False, secret=None, evalex_trusted=True):
"""Render the Full HTML page with the traceback info."""
exc = escape(self.exception)
return PAGE_HTML % {
"evalex": "true" if evalex else "false",
"evalex_trusted": "true" if evalex_trusted else "false",
"console": "false",
"title": exc,
"exception": exc,
"exception_type": escape(self.exception_type),
"summary": self.render_summary(include_title=False),
"plaintext": escape(self.plaintext),
"plaintext_cs": re.sub("-{2,}", "-", self.plaintext),
"traceback_id": self.id,
"secret": secret,
}
@cached_property
def plaintext(self):
return u"\n".join([group.render_text() for group in self.groups])
@property
def id(self):
return id(self)
class Group(object):
"""A group of frames for an exception in a traceback. On Python 3,
if the exception has a ``__cause__`` or ``__context__``, there are
multiple exception groups.
"""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
self.info = None
if not PY2:
if exc_value.__cause__ is not None:
self.info = (
u"The above exception was the direct cause of the"
u" following exception"
)
elif exc_value.__context__ is not None:
self.info = (
u"During handling of the above exception, another"
u" exception occurred"
)
self.frames = []
while tb is not None:
self.frames.append(Frame(exc_type, exc_value, tb))
tb = tb.tb_next
def filter_hidden_frames(self):
new_frames = []
hidden = False
for frame in self.frames:
hide = frame.hide
if hide in ("before", "before_and_this"):
new_frames = []
hidden = False
if hide == "before_and_this":
continue
elif hide in ("reset", "reset_and_this"):
hidden = False
if hide == "reset_and_this":
continue
elif hide in ("after", "after_and_this"):
hidden = True
if hide == "after_and_this":
continue
elif hide or hidden:
continue
new_frames.append(frame)
# if we only have one frame and that frame is from the codeop
# module, remove it.
if len(new_frames) == 1 and self.frames[0].module == "codeop":
del self.frames[:]
# if the last frame is missing something went terrible wrong :(
elif self.frames[-1] in new_frames:
self.frames[:] = new_frames
@property
def exception(self):
"""String representation of the exception."""
buf = traceback.format_exception_only(self.exc_type, self.exc_value)
rv = "".join(buf).strip()
return to_unicode(rv, "utf-8", "replace")
def render(self, mark_lib=True):
out = []
if self.info is not None:
out.append(u'<li><div class="exc-divider">%s:</div>' % self.info)
for frame in self.frames:
out.append(
u"<li%s>%s"
% (
u' title="%s"' % escape(frame.info) if frame.info else u"",
frame.render(mark_lib=mark_lib),
)
)
return u"\n".join(out)
def render_text(self):
out = []
if self.info is not None:
out.append(u"\n%s:\n" % self.info)
out.append(u"Traceback (most recent call last):")
for frame in self.frames:
out.append(frame.render_text())
out.append(self.exception)
return u"\n".join(out)
class Frame(object):
"""A single frame in a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.lineno = tb.tb_lineno
self.function_name = tb.tb_frame.f_code.co_name
self.locals = tb.tb_frame.f_locals
self.globals = tb.tb_frame.f_globals
fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
if fn[-4:] in (".pyo", ".pyc"):
fn = fn[:-1]
# if it's a file on the file system resolve the real filename.
if os.path.isfile(fn):
fn = os.path.realpath(fn)
self.filename = to_unicode(fn, get_filesystem_encoding())
self.module = self.globals.get("__name__", self.locals.get("__name__"))
self.loader = self.globals.get("__loader__", self.locals.get("__loader__"))
self.code = tb.tb_frame.f_code
# support for paste's traceback extensions
self.hide = self.locals.get("__traceback_hide__", False)
info = self.locals.get("__traceback_info__")
if info is not None:
info = to_unicode(info, "utf-8", "replace")
self.info = info
def render(self, mark_lib=True):
"""Render a single frame in a traceback."""
return FRAME_HTML % {
"id": self.id,
"filename": escape(self.filename),
"lineno": self.lineno,
"function_name": escape(self.function_name),
"lines": self.render_line_context(),
"library": "library" if mark_lib and self.is_library else "",
}
@cached_property
def is_library(self):
return any(
self.filename.startswith(path) for path in sysconfig.get_paths().values()
)
def render_text(self):
return u' File "%s", line %s, in %s\n %s' % (
self.filename,
self.lineno,
self.function_name,
self.current_line.strip(),
)
def render_line_context(self):
before, current, after = self.get_context_lines()
rv = []
def render_line(line, cls):
line = line.expandtabs().rstrip()
stripped_line = line.strip()
prefix = len(line) - len(stripped_line)
rv.append(
'<pre class="line %s"><span class="ws">%s</span>%s</pre>'
% (cls, " " * prefix, escape(stripped_line) or " ")
)
for line in before:
render_line(line, "before")
render_line(current, "current")
for line in after:
render_line(line, "after")
return "\n".join(rv)
def get_annotated_lines(self):
"""Helper function that returns lines with extra information."""
lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
# find function definition and mark lines
if hasattr(self.code, "co_firstlineno"):
lineno = self.code.co_firstlineno - 1
while lineno > 0:
if _funcdef_re.match(lines[lineno].code):
break
lineno -= 1
try:
offset = len(inspect.getblock([x.code + "\n" for x in lines[lineno:]]))
except TokenError:
offset = 0
for line in lines[lineno : lineno + offset]:
line.in_frame = True
# mark current line
try:
lines[self.lineno - 1].current = True
except IndexError:
pass
return lines
def eval(self, code, mode="single"):
"""Evaluate code in the context of the frame."""
if isinstance(code, string_types):
if PY2 and isinstance(code, text_type): # noqa
code = UTF8_COOKIE + code.encode("utf-8")
code = compile(code, "<interactive>", mode)
return eval(code, self.globals, self.locals)
@cached_property
def sourcelines(self):
"""The sourcecode of the file as list of unicode strings."""
# get sourcecode from loader or file
source = None
if self.loader is not None:
try:
if hasattr(self.loader, "get_source"):
source = self.loader.get_source(self.module)
elif hasattr(self.loader, "get_source_by_code"):
source = self.loader.get_source_by_code(self.code)
except Exception:
# we munch the exception so that we don't cause troubles
# if the loader is broken.
pass
if source is None:
try:
with open(
to_native(self.filename, get_filesystem_encoding()), mode="rb"
) as f:
source = f.read()
except IOError:
return []
# already unicode? return right away
if isinstance(source, text_type):
return source.splitlines()
# yes. it should be ascii, but we don't want to reject too many
# characters in the debugger if something breaks
charset = "utf-8"
if source.startswith(UTF8_COOKIE):
source = source[3:]
else:
for idx, match in enumerate(_line_re.finditer(source)):
match = _coding_re.search(match.group())
if match is not None:
charset = match.group(1)
break
if idx > 1:
break
# on broken cookies we fall back to utf-8 too
charset = to_native(charset)
try:
codecs.lookup(charset)
except LookupError:
charset = "utf-8"
return source.decode(charset, "replace").splitlines()
def get_context_lines(self, context=5):
before = self.sourcelines[self.lineno - context - 1 : self.lineno - 1]
past = self.sourcelines[self.lineno : self.lineno + context]
return (before, self.current_line, past)
@property
def current_line(self):
try:
return self.sourcelines[self.lineno - 1]
except IndexError:
return u""
@cached_property
def console(self):
return Console(self.globals, self.locals)
@property
def id(self):
return id(self)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/debug/__init__.py
|
# -*- coding: utf-8 -*-
"""
werkzeug.debug
~~~~~~~~~~~~~~
WSGI application traceback debugger.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import getpass
import hashlib
import json
import mimetypes
import os
import pkgutil
import re
import sys
import time
import uuid
from itertools import chain
from os.path import basename
from os.path import join
from .._compat import text_type
from .._internal import _log
from ..http import parse_cookie
from ..security import gen_salt
from ..wrappers import BaseRequest as Request
from ..wrappers import BaseResponse as Response
from .console import Console
from .tbtools import get_current_traceback
from .tbtools import render_console_html
# A week
PIN_TIME = 60 * 60 * 24 * 7
def hash_pin(pin):
if isinstance(pin, text_type):
pin = pin.encode("utf-8", "replace")
return hashlib.md5(pin + b"shittysalt").hexdigest()[:12]
_machine_id = None
def get_machine_id():
global _machine_id
if _machine_id is not None:
return _machine_id
def _generate():
linux = b""
# machine-id is stable across boots, boot_id is not.
for filename in "/etc/machine-id", "/proc/sys/kernel/random/boot_id":
try:
with open(filename, "rb") as f:
value = f.readline().strip()
except IOError:
continue
if value:
linux += value
break
# Containers share the same machine id, add some cgroup
# information. This is used outside containers too but should be
# relatively stable across boots.
try:
with open("/proc/self/cgroup", "rb") as f:
linux += f.readline().strip().rpartition(b"/")[2]
except IOError:
pass
if linux:
return linux
# On OS X, use ioreg to get the computer's serial number.
try:
# subprocess may not be available, e.g. Google App Engine
# https://github.com/pallets/werkzeug/issues/925
from subprocess import Popen, PIPE
dump = Popen(
["ioreg", "-c", "IOPlatformExpertDevice", "-d", "2"], stdout=PIPE
).communicate()[0]
match = re.search(b'"serial-number" = <([^>]+)', dump)
if match is not None:
return match.group(1)
except (OSError, ImportError):
pass
# On Windows, use winreg to get the machine guid.
try:
import winreg as wr
except ImportError:
try:
import _winreg as wr
except ImportError:
wr = None
if wr is not None:
try:
with wr.OpenKey(
wr.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Cryptography",
0,
wr.KEY_READ | wr.KEY_WOW64_64KEY,
) as rk:
guid, guid_type = wr.QueryValueEx(rk, "MachineGuid")
if guid_type == wr.REG_SZ:
return guid.encode("utf-8")
return guid
except WindowsError:
pass
_machine_id = _generate()
return _machine_id
class _ConsoleFrame(object):
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace):
self.console = Console(namespace)
self.id = 0
def get_pin_and_cookie_name(app):
"""Given an application object this returns a semi-stable 9 digit pin
code and a random key. The hope is that this is stable between
restarts to not make debugging particularly frustrating. If the pin
was forcefully disabled this returns `None`.
Second item in the resulting tuple is the cookie name for remembering.
"""
pin = os.environ.get("WERKZEUG_DEBUG_PIN")
rv = None
num = None
# Pin was explicitly disabled
if pin == "off":
return None, None
# Pin was provided explicitly
if pin is not None and pin.replace("-", "").isdigit():
# If there are separators in the pin, return it directly
if "-" in pin:
rv = pin
else:
num = pin
modname = getattr(app, "__module__", app.__class__.__module__)
try:
# getuser imports the pwd module, which does not exist in Google
# App Engine. It may also raise a KeyError if the UID does not
# have a username, such as in Docker.
username = getpass.getuser()
except (ImportError, KeyError):
username = None
mod = sys.modules.get(modname)
# This information only exists to make the cookie unique on the
# computer, not as a security feature.
probably_public_bits = [
username,
modname,
getattr(app, "__name__", app.__class__.__name__),
getattr(mod, "__file__", None),
]
# This information is here to make it harder for an attacker to
# guess the cookie name. They are unlikely to be contained anywhere
# within the unauthenticated debug page.
private_bits = [str(uuid.getnode()), get_machine_id()]
h = hashlib.md5()
for bit in chain(probably_public_bits, private_bits):
if not bit:
continue
if isinstance(bit, text_type):
bit = bit.encode("utf-8")
h.update(bit)
h.update(b"cookiesalt")
cookie_name = "__wzd" + h.hexdigest()[:20]
# If we need to generate a pin we salt it a bit more so that we don't
# end up with the same value and generate out 9 digits
if num is None:
h.update(b"pinsalt")
num = ("%09d" % int(h.hexdigest(), 16))[:9]
# Format the pincode in groups of digits for easier remembering if
# we don't have a result yet.
if rv is None:
for group_size in 5, 4, 3:
if len(num) % group_size == 0:
rv = "-".join(
num[x : x + group_size].rjust(group_size, "0")
for x in range(0, len(num), group_size)
)
break
else:
rv = num
return rv, cookie_name
class DebuggedApplication(object):
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
:param pin_security: can be used to disable the pin based security system.
:param pin_logging: enables the logging of the pin system.
"""
def __init__(
self,
app,
evalex=False,
request_key="werkzeug.request",
console_path="/console",
console_init_func=None,
show_hidden_frames=False,
pin_security=True,
pin_logging=True,
):
if not console_init_func:
console_init_func = None
self.app = app
self.evalex = evalex
self.frames = {}
self.tracebacks = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
self._failed_pin_auth = 0
self.pin_logging = pin_logging
if pin_security:
# Print out the pin for the debugger on standard out.
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" and pin_logging:
_log("warning", " * Debugger is active!")
if self.pin is None:
_log("warning", " * Debugger PIN disabled. DEBUGGER UNSECURED!")
else:
_log("info", " * Debugger PIN: %s" % self.pin)
else:
self.pin = None
@property
def pin(self):
if not hasattr(self, "_pin"):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin
@pin.setter
def pin(self, value):
self._pin = value
@property
def pin_cookie_name(self):
"""The name of the pin cookie."""
if not hasattr(self, "_pin_cookie"):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin_cookie
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, "close"):
app_iter.close()
except Exception:
if hasattr(app_iter, "close"):
app_iter.close()
traceback = get_current_traceback(
skip=1,
show_hidden_frames=self.show_hidden_frames,
ignore_system_exceptions=True,
)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response(
"500 INTERNAL SERVER ERROR",
[
("Content-Type", "text/html; charset=utf-8"),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
("X-XSS-Protection", "0"),
],
)
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ["wsgi.errors"].write(
"Debugging middleware caught exception in streamed "
"response at a point where response headers were already "
"sent.\n"
)
else:
is_trusted = bool(self.check_pin_trust(environ))
yield traceback.render_full(
evalex=self.evalex, evalex_trusted=is_trusted, secret=self.secret
).encode("utf-8", "replace")
traceback.log(environ["wsgi.errors"])
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype="text/html")
def display_console(self, request):
"""Display a standalone shell."""
if 0 not in self.frames:
if self.console_init_func is None:
ns = {}
else:
ns = dict(self.console_init_func())
ns.setdefault("app", self.app)
self.frames[0] = _ConsoleFrame(ns)
is_trusted = bool(self.check_pin_trust(request.environ))
return Response(
render_console_html(secret=self.secret, evalex_trusted=is_trusted),
mimetype="text/html",
)
def paste_traceback(self, request, traceback):
"""Paste the traceback and return a JSON response."""
rv = traceback.paste()
return Response(json.dumps(rv), mimetype="application/json")
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join("shared", basename(filename))
try:
data = pkgutil.get_data(__package__, filename)
except OSError:
data = None
if data is not None:
mimetype = mimetypes.guess_type(filename)[0] or "application/octet-stream"
return Response(data, mimetype=mimetype)
return Response("Not Found", status=404)
def check_pin_trust(self, environ):
"""Checks if the request passed the pin test. This returns `True` if the
request is trusted on a pin/cookie basis and returns `False` if not.
Additionally if the cookie's stored pin hash is wrong it will return
`None` so that appropriate action can be taken.
"""
if self.pin is None:
return True
val = parse_cookie(environ).get(self.pin_cookie_name)
if not val or "|" not in val:
return False
ts, pin_hash = val.split("|", 1)
if not ts.isdigit():
return False
if pin_hash != hash_pin(self.pin):
return None
return (time.time() - PIN_TIME) < int(ts)
def _fail_pin_auth(self):
time.sleep(5.0 if self._failed_pin_auth > 5 else 0.5)
self._failed_pin_auth += 1
def pin_auth(self, request):
"""Authenticates with the pin."""
exhausted = False
auth = False
trust = self.check_pin_trust(request.environ)
# If the trust return value is `None` it means that the cookie is
# set but the stored pin hash value is bad. This means that the
# pin was changed. In this case we count a bad auth and unset the
# cookie. This way it becomes harder to guess the cookie name
# instead of the pin as we still count up failures.
bad_cookie = False
if trust is None:
self._fail_pin_auth()
bad_cookie = True
# If we're trusted, we're authenticated.
elif trust:
auth = True
# If we failed too many times, then we're locked out.
elif self._failed_pin_auth > 10:
exhausted = True
# Otherwise go through pin based authentication
else:
entered_pin = request.args.get("pin")
if entered_pin.strip().replace("-", "") == self.pin.replace("-", ""):
self._failed_pin_auth = 0
auth = True
else:
self._fail_pin_auth()
rv = Response(
json.dumps({"auth": auth, "exhausted": exhausted}),
mimetype="application/json",
)
if auth:
rv.set_cookie(
self.pin_cookie_name,
"%s|%s" % (int(time.time()), hash_pin(self.pin)),
httponly=True,
)
elif bad_cookie:
rv.delete_cookie(self.pin_cookie_name)
return rv
def log_pin_request(self):
"""Log the pin if needed."""
if self.pin_logging and self.pin is not None:
_log(
"info", " * To enable the debugger you need to enter the security pin:"
)
_log("info", " * Debugger pin code: %s" % self.pin)
return Response("")
def __call__(self, environ, start_response):
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get("__debugger__") == "yes":
cmd = request.args.get("cmd")
arg = request.args.get("f")
secret = request.args.get("s")
traceback = self.tracebacks.get(request.args.get("tb", type=int))
frame = self.frames.get(request.args.get("frm", type=int))
if cmd == "resource" and arg:
response = self.get_resource(request, arg)
elif cmd == "paste" and traceback is not None and secret == self.secret:
response = self.paste_traceback(request, traceback)
elif cmd == "pinauth" and secret == self.secret:
response = self.pin_auth(request)
elif cmd == "printpin" and secret == self.secret:
response = self.log_pin_request()
elif (
self.evalex
and cmd is not None
and frame is not None
and self.secret == secret
and self.check_pin_trust(environ)
):
response = self.execute_command(request, cmd, frame)
elif (
self.evalex
and self.console_path is not None
and request.path == self.console_path
):
response = self.display_console(request)
return response(environ, start_response)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/debug/repr.py
|
# -*- coding: utf-8 -*-
"""
werkzeug.debug.repr
~~~~~~~~~~~~~~~~~~~
This module implements object representations for debugging purposes.
Unlike the default repr these reprs expose a lot more information and
produce HTML instead of ASCII.
Together with the CSS and JavaScript files of the debugger this gives
a colorful and more compact output.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import re
import sys
from collections import deque
from traceback import format_exception_only
from .._compat import integer_types
from .._compat import iteritems
from .._compat import PY2
from .._compat import string_types
from .._compat import text_type
from ..utils import escape
missing = object()
_paragraph_re = re.compile(r"(?:\r\n|\r|\n){2,}")
RegexType = type(_paragraph_re)
HELP_HTML = """\
<div class=box>
<h3>%(title)s</h3>
<pre class=help>%(text)s</pre>
</div>\
"""
OBJECT_DUMP_HTML = """\
<div class=box>
<h3>%(title)s</h3>
%(repr)s
<table>%(items)s</table>
</div>\
"""
def debug_repr(obj):
"""Creates a debug repr of an object as HTML unicode string."""
return DebugReprGenerator().repr(obj)
def dump(obj=missing):
"""Print the object details to stdout._write (for the interactive
console of the web debugger.
"""
gen = DebugReprGenerator()
if obj is missing:
rv = gen.dump_locals(sys._getframe(1).f_locals)
else:
rv = gen.dump_object(obj)
sys.stdout._write(rv)
class _Helper(object):
"""Displays an HTML version of the normal help, for the interactive
debugger only because it requires a patched sys.stdout.
"""
def __repr__(self):
return "Type help(object) for help about object."
def __call__(self, topic=None):
if topic is None:
sys.stdout._write("<span class=help>%s</span>" % repr(self))
return
import pydoc
pydoc.help(topic)
rv = sys.stdout.reset()
if isinstance(rv, bytes):
rv = rv.decode("utf-8", "ignore")
paragraphs = _paragraph_re.split(rv)
if len(paragraphs) > 1:
title = paragraphs[0]
text = "\n\n".join(paragraphs[1:])
else: # pragma: no cover
title = "Help"
text = paragraphs[0]
sys.stdout._write(HELP_HTML % {"title": title, "text": text})
helper = _Helper()
def _add_subclass_info(inner, obj, base):
if isinstance(base, tuple):
for base in base:
if type(obj) is base:
return inner
elif type(obj) is base:
return inner
module = ""
if obj.__class__.__module__ not in ("__builtin__", "exceptions"):
module = '<span class="module">%s.</span>' % obj.__class__.__module__
return "%s%s(%s)" % (module, obj.__class__.__name__, inner)
class DebugReprGenerator(object):
def __init__(self):
self._stack = []
def _sequence_repr_maker(left, right, base=object(), limit=8): # noqa: B008, B902
def proxy(self, obj, recursive):
if recursive:
return _add_subclass_info(left + "..." + right, obj, base)
buf = [left]
have_extended_section = False
for idx, item in enumerate(obj):
if idx:
buf.append(", ")
if idx == limit:
buf.append('<span class="extended">')
have_extended_section = True
buf.append(self.repr(item))
if have_extended_section:
buf.append("</span>")
buf.append(right)
return _add_subclass_info(u"".join(buf), obj, base)
return proxy
list_repr = _sequence_repr_maker("[", "]", list)
tuple_repr = _sequence_repr_maker("(", ")", tuple)
set_repr = _sequence_repr_maker("set([", "])", set)
frozenset_repr = _sequence_repr_maker("frozenset([", "])", frozenset)
deque_repr = _sequence_repr_maker(
'<span class="module">collections.' "</span>deque([", "])", deque
)
del _sequence_repr_maker
def regex_repr(self, obj):
pattern = repr(obj.pattern)
if PY2:
pattern = pattern.decode("string-escape", "ignore")
else:
pattern = codecs.decode(pattern, "unicode-escape", "ignore")
if pattern[:1] == "u":
pattern = "ur" + pattern[1:]
else:
pattern = "r" + pattern
return u're.compile(<span class="string regex">%s</span>)' % pattern
def string_repr(self, obj, limit=70):
buf = ['<span class="string">']
r = repr(obj)
# shorten the repr when the hidden part would be at least 3 chars
if len(r) - limit > 2:
buf.extend(
(
escape(r[:limit]),
'<span class="extended">',
escape(r[limit:]),
"</span>",
)
)
else:
buf.append(escape(r))
buf.append("</span>")
out = u"".join(buf)
# if the repr looks like a standard string, add subclass info if needed
if r[0] in "'\"" or (r[0] in "ub" and r[1] in "'\""):
return _add_subclass_info(out, obj, (bytes, text_type))
# otherwise, assume the repr distinguishes the subclass already
return out
def dict_repr(self, d, recursive, limit=5):
if recursive:
return _add_subclass_info(u"{...}", d, dict)
buf = ["{"]
have_extended_section = False
for idx, (key, value) in enumerate(iteritems(d)):
if idx:
buf.append(", ")
if idx == limit - 1:
buf.append('<span class="extended">')
have_extended_section = True
buf.append(
'<span class="pair"><span class="key">%s</span>: '
'<span class="value">%s</span></span>'
% (self.repr(key), self.repr(value))
)
if have_extended_section:
buf.append("</span>")
buf.append("}")
return _add_subclass_info(u"".join(buf), d, dict)
def object_repr(self, obj):
r = repr(obj)
if PY2:
r = r.decode("utf-8", "replace")
return u'<span class="object">%s</span>' % escape(r)
def dispatch_repr(self, obj, recursive):
if obj is helper:
return u'<span class="help">%r</span>' % helper
if isinstance(obj, (integer_types, float, complex)):
return u'<span class="number">%r</span>' % obj
if isinstance(obj, string_types) or isinstance(obj, bytes):
return self.string_repr(obj)
if isinstance(obj, RegexType):
return self.regex_repr(obj)
if isinstance(obj, list):
return self.list_repr(obj, recursive)
if isinstance(obj, tuple):
return self.tuple_repr(obj, recursive)
if isinstance(obj, set):
return self.set_repr(obj, recursive)
if isinstance(obj, frozenset):
return self.frozenset_repr(obj, recursive)
if isinstance(obj, dict):
return self.dict_repr(obj, recursive)
if deque is not None and isinstance(obj, deque):
return self.deque_repr(obj, recursive)
return self.object_repr(obj)
def fallback_repr(self):
try:
info = "".join(format_exception_only(*sys.exc_info()[:2]))
except Exception: # pragma: no cover
info = "?"
if PY2:
info = info.decode("utf-8", "ignore")
return u'<span class="brokenrepr"><broken repr (%s)>' u"</span>" % escape(
info.strip()
)
def repr(self, obj):
recursive = False
for item in self._stack:
if item is obj:
recursive = True
break
self._stack.append(obj)
try:
try:
return self.dispatch_repr(obj, recursive)
except Exception:
return self.fallback_repr()
finally:
self._stack.pop()
def dump_object(self, obj):
repr = items = None
if isinstance(obj, dict):
title = "Contents of"
items = []
for key, value in iteritems(obj):
if not isinstance(key, string_types):
items = None
break
items.append((key, self.repr(value)))
if items is None:
items = []
repr = self.repr(obj)
for key in dir(obj):
try:
items.append((key, self.repr(getattr(obj, key))))
except Exception:
pass
title = "Details for"
title += " " + object.__repr__(obj)[1:-1]
return self.render_object_dump(items, title, repr)
def dump_locals(self, d):
items = [(key, self.repr(value)) for key, value in d.items()]
return self.render_object_dump(items, "Local variables in frame")
def render_object_dump(self, items, title, repr=None):
html_items = []
for key, value in items:
html_items.append(
"<tr><th>%s<td><pre class=repr>%s</pre>" % (escape(key), value)
)
if not html_items:
html_items.append("<tr><td><em>Nothing</em>")
return OBJECT_DUMP_HTML % {
"title": escape(title),
"repr": "<pre class=repr>%s</pre>" % repr if repr else "",
"items": "\n".join(html_items),
}
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/debug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/debug/shared/FONT_LICENSE
|
-------------------------------
UBUNTU FONT LICENCE Version 1.0
-------------------------------
PREAMBLE
This licence allows the licensed fonts to be used, studied, modified and
redistributed freely. The fonts, including any derivative works, can be
bundled, embedded, and redistributed provided the terms of this licence
are met. The fonts and derivatives, however, cannot be released under
any other licence. The requirement for fonts to remain under this
licence does not require any document created using the fonts or their
derivatives to be published under this licence, as long as the primary
purpose of the document is not to be a vehicle for the distribution of
the fonts.
DEFINITIONS
"Font Software" refers to the set of files released by the Copyright
Holder(s) under this licence and clearly marked as such. This may
include source files, build scripts and documentation.
"Original Version" refers to the collection of Font Software components
as received under this licence.
"Modified Version" refers to any derivative made by adding to, deleting,
or substituting -- in part or in whole -- any of the components of the
Original Version, by changing formats or by porting the Font Software to
a new environment.
"Copyright Holder(s)" refers to all individuals and companies who have a
copyright ownership of the Font Software.
"Substantially Changed" refers to Modified Versions which can be easily
identified as dissimilar to the Font Software by users of the Font
Software comparing the Original Version with the Modified Version.
To "Propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification and with or without charging
a redistribution fee), making available to the public, and in some
countries other activities as well.
PERMISSION & CONDITIONS
This licence does not grant any rights under trademark law and all such
rights are reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of the Font Software, to propagate the Font Software, subject to
the below conditions:
1) Each copy of the Font Software must contain the above copyright
notice and this licence. These can be included either as stand-alone
text files, human-readable headers or in the appropriate machine-
readable metadata fields within text or binary files as long as those
fields can be easily viewed by the user.
2) The font name complies with the following:
(a) The Original Version must retain its name, unmodified.
(b) Modified Versions which are Substantially Changed must be renamed to
avoid use of the name of the Original Version or similar names entirely.
(c) Modified Versions which are not Substantially Changed must be
renamed to both (i) retain the name of the Original Version and (ii) add
additional naming elements to distinguish the Modified Version from the
Original Version. The name of such Modified Versions must be the name of
the Original Version, with "derivative X" where X represents the name of
the new work, appended to that name.
3) The name(s) of the Copyright Holder(s) and any contributor to the
Font Software shall not be used to promote, endorse or advertise any
Modified Version, except (i) as required by this licence, (ii) to
acknowledge the contribution(s) of the Copyright Holder(s) or (iii) with
their explicit written permission.
4) The Font Software, modified or unmodified, in part or in whole, must
be distributed entirely under this licence, and must not be distributed
under any other licence. The requirement for fonts to remain under this
licence does not affect any document created using the Font Software,
except any version of the Font Software extracted from a document
created using the Font Software may only be distributed under this
licence.
TERMINATION
This licence becomes null and void if any of the above conditions are
not met.
DISCLAIMER
THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER
DEALINGS IN THE FONT SOFTWARE.
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/debug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/debug/shared/style.css
|
@font-face {
font-family: 'Ubuntu';
font-style: normal;
font-weight: normal;
src: local('Ubuntu'), local('Ubuntu-Regular'),
url('?__debugger__=yes&cmd=resource&f=ubuntu.ttf') format('truetype');
}
body, input { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; color: #000; text-align: center;
margin: 1em; padding: 0; font-size: 15px; }
h1, h2, h3 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
input { background-color: #fff; margin: 0; text-align: left;
outline: none !important; }
input[type="submit"] { padding: 3px 6px; }
a { color: #11557C; }
a:hover { color: #177199; }
pre, code,
textarea { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
monospace; font-size: 14px; }
div.debugger { text-align: left; padding: 12px; margin: auto;
background-color: white; }
h1 { font-size: 36px; margin: 0 0 0.3em 0; }
div.detail { cursor: pointer; }
div.detail p { margin: 0 0 8px 13px; font-size: 14px; white-space: pre-wrap;
font-family: monospace; }
div.explanation { margin: 20px 13px; font-size: 15px; color: #555; }
div.footer { font-size: 13px; text-align: right; margin: 30px 0;
color: #86989B; }
h2 { font-size: 16px; margin: 1.3em 0 0.0 0; padding: 9px;
background-color: #11557C; color: white; }
h2 em, h3 em { font-style: normal; color: #A5D6D9; font-weight: normal; }
div.traceback, div.plain { border: 1px solid #ddd; margin: 0 0 1em 0; padding: 10px; }
div.plain p { margin: 0; }
div.plain textarea,
div.plain pre { margin: 10px 0 0 0; padding: 4px;
background-color: #E8EFF0; border: 1px solid #D3E7E9; }
div.plain textarea { width: 99%; height: 300px; }
div.traceback h3 { font-size: 1em; margin: 0 0 0.8em 0; }
div.traceback ul { list-style: none; margin: 0; padding: 0 0 0 1em; }
div.traceback h4 { font-size: 13px; font-weight: normal; margin: 0.7em 0 0.1em 0; }
div.traceback pre { margin: 0; padding: 5px 0 3px 15px;
background-color: #E8EFF0; border: 1px solid #D3E7E9; }
div.traceback .library .current { background: white; color: #555; }
div.traceback .expanded .current { background: #E8EFF0; color: black; }
div.traceback pre:hover { background-color: #DDECEE; color: black; cursor: pointer; }
div.traceback div.source.expanded pre + pre { border-top: none; }
div.traceback span.ws { display: none; }
div.traceback pre.before, div.traceback pre.after { display: none; background: white; }
div.traceback div.source.expanded pre.before,
div.traceback div.source.expanded pre.after {
display: block;
}
div.traceback div.source.expanded span.ws {
display: inline;
}
div.traceback blockquote { margin: 1em 0 0 0; padding: 0; white-space: pre-line; }
div.traceback img { float: right; padding: 2px; margin: -3px 2px 0 0; display: none; }
div.traceback img:hover { background-color: #ddd; cursor: pointer;
border-color: #BFDDE0; }
div.traceback pre:hover img { display: block; }
div.traceback cite.filename { font-style: normal; color: #3B666B; }
pre.console { border: 1px solid #ccc; background: white!important;
color: black; padding: 5px!important;
margin: 3px 0 0 0!important; cursor: default!important;
max-height: 400px; overflow: auto; }
pre.console form { color: #555; }
pre.console input { background-color: transparent; color: #555;
width: 90%; font-family: 'Consolas', 'Deja Vu Sans Mono',
'Bitstream Vera Sans Mono', monospace; font-size: 14px;
border: none!important; }
span.string { color: #30799B; }
span.number { color: #9C1A1C; }
span.help { color: #3A7734; }
span.object { color: #485F6E; }
span.extended { opacity: 0.5; }
span.extended:hover { opacity: 1; }
a.toggle { text-decoration: none; background-repeat: no-repeat;
background-position: center center;
background-image: url(?__debugger__=yes&cmd=resource&f=more.png); }
a.toggle:hover { background-color: #444; }
a.open { background-image: url(?__debugger__=yes&cmd=resource&f=less.png); }
pre.console div.traceback,
pre.console div.box { margin: 5px 10px; white-space: normal;
border: 1px solid #11557C; padding: 10px;
font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; }
pre.console div.box h3,
pre.console div.traceback h3 { margin: -10px -10px 10px -10px; padding: 5px;
background: #11557C; color: white; }
pre.console div.traceback pre:hover { cursor: default; background: #E8EFF0; }
pre.console div.traceback pre.syntaxerror { background: inherit; border: none;
margin: 20px -10px -10px -10px;
padding: 10px; border-top: 1px solid #BFDDE0;
background: #E8EFF0; }
pre.console div.noframe-traceback pre.syntaxerror { margin-top: -10px; border: none; }
pre.console div.box pre.repr { padding: 0; margin: 0; background-color: white; border: none; }
pre.console div.box table { margin-top: 6px; }
pre.console div.box pre { border: none; }
pre.console div.box pre.help { background-color: white; }
pre.console div.box pre.help:hover { cursor: default; }
pre.console table tr { vertical-align: top; }
div.console { border: 1px solid #ccc; padding: 4px; background-color: #fafafa; }
div.traceback pre, div.console pre {
white-space: pre-wrap; /* css-3 should we be so lucky... */
white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
white-space: -pre-wrap; /* Opera 4-6 ?? */
white-space: -o-pre-wrap; /* Opera 7 ?? */
word-wrap: break-word; /* Internet Explorer 5.5+ */
_white-space: pre; /* IE only hack to re-specify in
addition to word-wrap */
}
div.pin-prompt {
position: absolute;
display: none;
top: 0;
bottom: 0;
left: 0;
right: 0;
background: rgba(255, 255, 255, 0.8);
}
div.pin-prompt .inner {
background: #eee;
padding: 10px 50px;
width: 350px;
margin: 10% auto 0 auto;
border: 1px solid #ccc;
border-radius: 2px;
}
div.exc-divider {
margin: 0.7em 0 0 -1em;
padding: 0.5em;
background: #11557C;
color: #ddd;
border: 1px solid #ddd;
}
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/debug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/debug/shared/debugger.js
|
$(function() {
if (!EVALEX_TRUSTED) {
initPinBox();
}
/**
* if we are in console mode, show the console.
*/
if (CONSOLE_MODE && EVALEX) {
openShell(null, $('div.console div.inner').empty(), 0);
}
$("div.detail").click(function() {
$("div.traceback").get(0).scrollIntoView(false);
});
$('div.traceback div.frame').each(function() {
var
target = $('pre', this),
consoleNode = null,
frameID = this.id.substring(6);
target.click(function() {
$(this).parent().toggleClass('expanded');
});
/**
* Add an interactive console to the frames
*/
if (EVALEX && target.is('.current')) {
$('<img src="?__debugger__=yes&cmd=resource&f=console.png">')
.attr('title', 'Open an interactive python shell in this frame')
.click(function() {
consoleNode = openShell(consoleNode, target, frameID);
return false;
})
.prependTo(target);
}
});
/**
* toggle traceback types on click.
*/
$('h2.traceback').click(function() {
$(this).next().slideToggle('fast');
$('div.plain').slideToggle('fast');
}).css('cursor', 'pointer');
$('div.plain').hide();
/**
* Add extra info (this is here so that only users with JavaScript
* enabled see it.)
*/
$('span.nojavascript')
.removeClass('nojavascript')
.html('<p>To switch between the interactive traceback and the plaintext ' +
'one, you can click on the "Traceback" headline. From the text ' +
'traceback you can also create a paste of it. ' + (!EVALEX ? '' :
'For code execution mouse-over the frame you want to debug and ' +
'click on the console icon on the right side.' +
'<p>You can execute arbitrary Python code in the stack frames and ' +
'there are some extra helpers available for introspection:' +
'<ul><li><code>dump()</code> shows all variables in the frame' +
'<li><code>dump(obj)</code> dumps all that\'s known about the object</ul>'));
/**
* Add the pastebin feature
*/
$('div.plain form')
.submit(function() {
var label = $('input[type="submit"]', this);
var old_val = label.val();
label.val('submitting...');
$.ajax({
dataType: 'json',
url: document.location.pathname,
data: {__debugger__: 'yes', tb: TRACEBACK, cmd: 'paste',
s: SECRET},
success: function(data) {
$('div.plain span.pastemessage')
.removeClass('pastemessage')
.text('Paste created: ')
.append($('<a>#' + data.id + '</a>').attr('href', data.url));
},
error: function() {
alert('Error: Could not submit paste. No network connection?');
label.val(old_val);
}
});
return false;
});
// if we have javascript we submit by ajax anyways, so no need for the
// not scaling textarea.
var plainTraceback = $('div.plain textarea');
plainTraceback.replaceWith($('<pre>').text(plainTraceback.text()));
});
function initPinBox() {
$('.pin-prompt form').submit(function(evt) {
evt.preventDefault();
var pin = this.pin.value;
var btn = this.btn;
btn.disabled = true;
$.ajax({
dataType: 'json',
url: document.location.pathname,
data: {__debugger__: 'yes', cmd: 'pinauth', pin: pin,
s: SECRET},
success: function(data) {
btn.disabled = false;
if (data.auth) {
EVALEX_TRUSTED = true;
$('.pin-prompt').fadeOut();
} else {
if (data.exhausted) {
alert('Error: too many attempts. Restart server to retry.');
} else {
alert('Error: incorrect pin');
}
}
console.log(data);
},
error: function() {
btn.disabled = false;
alert('Error: Could not verify PIN. Network error?');
}
});
});
}
function promptForPin() {
if (!EVALEX_TRUSTED) {
$.ajax({
url: document.location.pathname,
data: {__debugger__: 'yes', cmd: 'printpin', s: SECRET}
});
$('.pin-prompt').fadeIn(function() {
$('.pin-prompt input[name="pin"]').focus();
});
}
}
/**
* Helper function for shell initialization
*/
function openShell(consoleNode, target, frameID) {
promptForPin();
if (consoleNode)
return consoleNode.slideToggle('fast');
consoleNode = $('<pre class="console">')
.appendTo(target.parent())
.hide()
var historyPos = 0, history = [''];
var output = $('<div class="output">[console ready]</div>')
.appendTo(consoleNode);
var form = $('<form>>>> </form>')
.submit(function() {
var cmd = command.val();
$.get('', {
__debugger__: 'yes', cmd: cmd, frm: frameID, s: SECRET}, function(data) {
var tmp = $('<div>').html(data);
$('span.extended', tmp).each(function() {
var hidden = $(this).wrap('<span>').hide();
hidden
.parent()
.append($('<a href="#" class="toggle"> </a>')
.click(function() {
hidden.toggle();
$(this).toggleClass('open')
return false;
}));
});
output.append(tmp);
command.focus();
consoleNode.scrollTop(consoleNode.get(0).scrollHeight);
var old = history.pop();
history.push(cmd);
if (typeof old != 'undefined')
history.push(old);
historyPos = history.length - 1;
});
command.val('');
return false;
}).
appendTo(consoleNode);
var command = $('<input type="text" autocomplete="off" autocorrect="off" autocapitalize="off" spellcheck="false">')
.appendTo(form)
.keydown(function(e) {
if (e.key == 'l' && e.ctrlKey) {
output.text('--- screen cleared ---');
return false;
}
else if (e.charCode == 0 && (e.keyCode == 38 || e.keyCode == 40)) {
// handle up arrow and down arrow
if (e.keyCode == 38 && historyPos > 0)
historyPos--;
else if (e.keyCode == 40 && historyPos < history.length)
historyPos++;
command.val(history[historyPos]);
return false;
}
});
return consoleNode.slideDown('fast', function() {
command.focus();
});
}
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/debug
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/werkzeug/debug/shared/jquery.js
|
/*! jQuery v3.4.1 | (c) JS Foundation and other contributors | jquery.org/license */
!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],E=C.document,r=Object.getPrototypeOf,s=t.slice,g=t.concat,u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType},x=function(e){return null!=e&&e===e.window},c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.4.1",k=function(e,t){return new k.fn.init(e,t)},p=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;function d(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0<t&&t-1 in e)}k.fn=k.prototype={jquery:f,constructor:k,length:0,toArray:function(){return s.call(this)},get:function(e){return null==e?s.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var t=k.merge(this.constructor(),e);return t.prevObject=this,t},each:function(e){return k.each(this,e)},map:function(n){return this.pushStack(k.map(this,function(e,t){return n.call(e,t,e)}))},slice:function(){return this.pushStack(s.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(e<0?t:0);return this.pushStack(0<=n&&n<t?[this[n]]:[])},end:function(){return this.prevObject||this.constructor()},push:u,sort:t.sort,splice:t.splice},k.extend=k.fn.extend=function(){var e,t,n,r,i,o,a=arguments[0]||{},s=1,u=arguments.length,l=!1;for("boolean"==typeof a&&(l=a,a=arguments[s]||{},s++),"object"==typeof a||m(a)||(a={}),s===u&&(a=this,s--);s<u;s++)if(null!=(e=arguments[s]))for(t in e)r=e[t],"__proto__"!==t&&a!==r&&(l&&r&&(k.isPlainObject(r)||(i=Array.isArray(r)))?(n=a[t],o=i&&!Array.isArray(n)?[]:i||k.isPlainObject(n)?n:{},i=!1,a[t]=k.extend(l,o,r)):void 0!==r&&(a[t]=r));return a},k.extend({expando:"jQuery"+(f+Math.random()).replace(/\D/g,""),isReady:!0,error:function(e){throw new Error(e)},noop:function(){},isPlainObject:function(e){var t,n;return!(!e||"[object Object]"!==o.call(e))&&(!(t=r(e))||"function"==typeof(n=v.call(t,"constructor")&&t.constructor)&&a.call(n)===l)},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},globalEval:function(e,t){b(e,{nonce:t&&t.nonce})},each:function(e,t){var n,r=0;if(d(e)){for(n=e.length;r<n;r++)if(!1===t.call(e[r],r,e[r]))break}else for(r in e)if(!1===t.call(e[r],r,e[r]))break;return e},trim:function(e){return null==e?"":(e+"").replace(p,"")},makeArray:function(e,t){var n=t||[];return null!=e&&(d(Object(e))?k.merge(n,"string"==typeof e?[e]:e):u.call(n,e)),n},inArray:function(e,t,n){return null==t?-1:i.call(t,e,n)},merge:function(e,t){for(var n=+t.length,r=0,i=e.length;r<n;r++)e[i++]=t[r];return e.length=i,e},grep:function(e,t,n){for(var r=[],i=0,o=e.length,a=!n;i<o;i++)!t(e[i],i)!==a&&r.push(e[i]);return r},map:function(e,t,n){var r,i,o=0,a=[];if(d(e))for(r=e.length;o<r;o++)null!=(i=t(e[o],o,n))&&a.push(i);else for(o in e)null!=(i=t(e[o],o,n))&&a.push(i);return g.apply([],a)},guid:1,support:y}),"function"==typeof Symbol&&(k.fn[Symbol.iterator]=t[Symbol.iterator]),k.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(e,t){n["[object "+t+"]"]=t.toLowerCase()});var h=function(n){var e,d,b,o,i,h,f,g,w,u,l,T,C,a,E,v,s,c,y,k="sizzle"+1*new Date,m=n.document,S=0,r=0,p=ue(),x=ue(),N=ue(),A=ue(),D=function(e,t){return e===t&&(l=!0),0},j={}.hasOwnProperty,t=[],q=t.pop,L=t.push,H=t.push,O=t.slice,P=function(e,t){for(var n=0,r=e.length;n<r;n++)if(e[n]===t)return n;return-1},R="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",I="(?:\\\\.|[\\w-]|[^\0-\\xa0])+",W="\\["+M+"*("+I+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+I+"))|)"+M+"*\\]",$=":("+I+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+W+")*)|.*)\\)|)",F=new RegExp(M+"+","g"),B=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),_=new RegExp("^"+M+"*,"+M+"*"),z=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp($),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+$),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ne=function(e,t,n){var r="0x"+t-65536;return r!=r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(m.childNodes),m.childNodes),t[m.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&((e?e.ownerDocument||e:m)!==C&&T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!A[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&U.test(t)){(s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=k),o=(l=h(t)).length;while(o--)l[o]="#"+s+" "+xe(l[o]);c=l.join(","),f=ee.test(t)&&ye(e.parentNode)||e}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){A(t,!0)}finally{s===k&&e.removeAttribute("id")}}}return g(t.replace(B,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[k]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:m;return r!==C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),m!==C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=k,!C.getElementsByName||!C.getElementsByName(k).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){a.appendChild(e).innerHTML="<a id='"+k+"'></a><select id='"+k+"-\r\\' msallowcapture=''><option selected=''></option></select>",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+k+"-]").length||v.push("~="),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+k+"+*").length||v.push(".#.+[+~]")}),ce(function(e){e.innerHTML="<a href='' disabled='disabled'></a><select disabled='disabled'><option/></select>";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",$)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e===C||e.ownerDocument===m&&y(m,e)?-1:t===C||t.ownerDocument===m&&y(m,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===C?-1:t===C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]===m?-1:s[r]===m?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if((e.ownerDocument||e)!==C&&T(e),d.matchesSelector&&E&&!A[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){A(t,!0)}return 0<se(t,C,null,[e]).length},se.contains=function(e,t){return(e.ownerDocument||e)!==C&&T(e),y(e,t)},se.attr=function(e,t){(e.ownerDocument||e)!==C&&T(e);var n=b.attrHandle[t.toLowerCase()],r=n&&j.call(b.attrHandle,t.toLowerCase())?n(e,t,!E):void 0;return void 0!==r?r:d.attributes||!E?e.getAttribute(t):(r=e.getAttributeNode(t))&&r.specified?r.value:null},se.escape=function(e){return(e+"").replace(re,ie)},se.error=function(e){throw new Error("Syntax error, unrecognized expression: "+e)},se.uniqueSort=function(e){var t,n=[],r=0,i=0;if(l=!d.detectDuplicates,u=!d.sortStable&&e.slice(0),e.sort(D),l){while(t=e[i++])t===e[i]&&(r=n.push(i));while(r--)e.splice(n[r],1)}return u=null,e},o=se.getText=function(e){var t,n="",r=0,i=e.nodeType;if(i){if(1===i||9===i||11===i){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=o(e)}else if(3===i||4===i)return e.nodeValue}else while(t=e[r++])n+=o(t);return n},(b=se.selectors={cacheLength:50,createPseudo:le,match:G,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=p[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&p(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1<t.indexOf(i):"$="===r?i&&t.slice(-i.length)===i:"~="===r?-1<(" "+t.replace(F," ")+" ").indexOf(i):"|="===r&&(t===i||t.slice(0,i.length+1)===i+"-"))}},CHILD:function(h,e,t,g,v){var y="nth"!==h.slice(0,3),m="last"!==h.slice(-4),x="of-type"===e;return 1===g&&0===v?function(e){return!!e.parentNode}:function(e,t,n){var r,i,o,a,s,u,l=y!==m?"nextSibling":"previousSibling",c=e.parentNode,f=x&&e.nodeName.toLowerCase(),p=!n&&!x,d=!1;if(c){if(y){while(l){a=e;while(a=a[l])if(x?a.nodeName.toLowerCase()===f:1===a.nodeType)return!1;u=l="only"===h&&!u&&"nextSibling"}return!0}if(u=[m?c.firstChild:c.lastChild],m&&p){d=(s=(r=(i=(o=(a=c)[k]||(a[k]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]||[])[0]===S&&r[1])&&r[2],a=s&&c.childNodes[s];while(a=++s&&a&&a[l]||(d=s=0)||u.pop())if(1===a.nodeType&&++d&&a===e){i[h]=[S,s,d];break}}else if(p&&(d=s=(r=(i=(o=(a=e)[k]||(a[k]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]||[])[0]===S&&r[1]),!1===d)while(a=++s&&a&&a[l]||(d=s=0)||u.pop())if((x?a.nodeName.toLowerCase()===f:1===a.nodeType)&&++d&&(p&&((i=(o=a[k]||(a[k]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]=[S,d]),a===e))break;return(d-=v)===g||d%g==0&&0<=d/g}}},PSEUDO:function(e,o){var t,a=b.pseudos[e]||b.setFilters[e.toLowerCase()]||se.error("unsupported pseudo: "+e);return a[k]?a(o):1<a.length?(t=[e,e,"",o],b.setFilters.hasOwnProperty(e.toLowerCase())?le(function(e,t){var n,r=a(e,o),i=r.length;while(i--)e[n=P(e,r[i])]=!(t[n]=r[i])}):function(e){return a(e,0,t)}):a}},pseudos:{not:le(function(e){var r=[],i=[],s=f(e.replace(B,"$1"));return s[k]?le(function(e,t,n,r){var i,o=s(e,null,r,[]),a=e.length;while(a--)(i=o[a])&&(e[a]=!(t[a]=i))}):function(e,t,n){return r[0]=e,s(r,null,n,i),r[0]=null,!i.pop()}}),has:le(function(t){return function(e){return 0<se(t,e).length}}),contains:le(function(t){return t=t.replace(te,ne),function(e){return-1<(e.textContent||o(e)).indexOf(t)}}),lang:le(function(n){return V.test(n||"")||se.error("unsupported lang: "+n),n=n.replace(te,ne).toLowerCase(),function(e){var t;do{if(t=E?e.lang:e.getAttribute("xml:lang")||e.getAttribute("lang"))return(t=t.toLowerCase())===n||0===t.indexOf(n+"-")}while((e=e.parentNode)&&1===e.nodeType);return!1}}),target:function(e){var t=n.location&&n.location.hash;return t&&t.slice(1)===e.id},root:function(e){return e===a},focus:function(e){return e===C.activeElement&&(!C.hasFocus||C.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:ge(!1),disabled:ge(!0),checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,!0===e.selected},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeType<6)return!1;return!0},parent:function(e){return!b.pseudos.empty(e)},header:function(e){return J.test(e.nodeName)},input:function(e){return Q.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||"text"===t.toLowerCase())},first:ve(function(){return[0]}),last:ve(function(e,t){return[t-1]}),eq:ve(function(e,t,n){return[n<0?n+t:n]}),even:ve(function(e,t){for(var n=0;n<t;n+=2)e.push(n);return e}),odd:ve(function(e,t){for(var n=1;n<t;n+=2)e.push(n);return e}),lt:ve(function(e,t,n){for(var r=n<0?n+t:t<n?t:n;0<=--r;)e.push(r);return e}),gt:ve(function(e,t,n){for(var r=n<0?n+t:n;++r<t;)e.push(r);return e})}}).pseudos.nth=b.pseudos.eq,{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})b.pseudos[e]=de(e);for(e in{submit:!0,reset:!0})b.pseudos[e]=he(e);function me(){}function xe(e){for(var t=0,n=e.length,r="";t<n;t++)r+=e[t].value;return r}function be(s,e,t){var u=e.dir,l=e.next,c=l||u,f=t&&"parentNode"===c,p=r++;return e.first?function(e,t,n){while(e=e[u])if(1===e.nodeType||f)return s(e,t,n);return!1}:function(e,t,n){var r,i,o,a=[S,p];if(n){while(e=e[u])if((1===e.nodeType||f)&&s(e,t,n))return!0}else while(e=e[u])if(1===e.nodeType||f)if(i=(o=e[k]||(e[k]={}))[e.uniqueID]||(o[e.uniqueID]={}),l&&l===e.nodeName.toLowerCase())e=e[u]||e;else{if((r=i[c])&&r[0]===S&&r[1]===p)return a[2]=r[2];if((i[c]=a)[2]=s(e,t,n))return!0}return!1}}function we(i){return 1<i.length?function(e,t,n){var r=i.length;while(r--)if(!i[r](e,t,n))return!1;return!0}:i[0]}function Te(e,t,n,r,i){for(var o,a=[],s=0,u=e.length,l=null!=t;s<u;s++)(o=e[s])&&(n&&!n(o,r,i)||(a.push(o),l&&t.push(s)));return a}function Ce(d,h,g,v,y,e){return v&&!v[k]&&(v=Ce(v)),y&&!y[k]&&(y=Ce(y,e)),le(function(e,t,n,r){var i,o,a,s=[],u=[],l=t.length,c=e||function(e,t,n){for(var r=0,i=t.length;r<i;r++)se(e,t[r],n);return n}(h||"*",n.nodeType?[n]:n,[]),f=!d||!e&&h?c:Te(c,s,d,n,r),p=g?y||(e?d:l||v)?[]:t:f;if(g&&g(f,p,n,r),v){i=Te(p,u),v(i,[],n,r),o=i.length;while(o--)(a=i[o])&&(p[u[o]]=!(f[u[o]]=a))}if(e){if(y||d){if(y){i=[],o=p.length;while(o--)(a=p[o])&&i.push(f[o]=a);y(null,p=[],i,r)}o=p.length;while(o--)(a=p[o])&&-1<(i=y?P(e,a):s[o])&&(e[i]=!(t[i]=a))}}else p=Te(p===t?p.splice(l,p.length):p),y?y(null,t,p,r):H.apply(t,p)})}function Ee(e){for(var i,t,n,r=e.length,o=b.relative[e[0].type],a=o||b.relative[" "],s=o?1:0,u=be(function(e){return e===i},a,!0),l=be(function(e){return-1<P(i,e)},a,!0),c=[function(e,t,n){var r=!o&&(n||t!==w)||((i=t).nodeType?u(e,t,n):l(e,t,n));return i=null,r}];s<r;s++)if(t=b.relative[e[s].type])c=[be(we(c),t)];else{if((t=b.filter[e[s].type].apply(null,e[s].matches))[k]){for(n=++s;n<r;n++)if(b.relative[e[n].type])break;return Ce(1<s&&we(c),1<s&&xe(e.slice(0,s-1).concat({value:" "===e[s-2].type?"*":""})).replace(B,"$1"),t,s<n&&Ee(e.slice(s,n)),n<r&&Ee(e=e.slice(n)),n<r&&xe(e))}c.push(t)}return we(c)}return me.prototype=b.filters=b.pseudos,b.setFilters=new me,h=se.tokenize=function(e,t){var n,r,i,o,a,s,u,l=x[e+" "];if(l)return t?0:l.slice(0);a=e,s=[],u=b.preFilter;while(a){for(o in n&&!(r=_.exec(a))||(r&&(a=a.slice(r[0].length)||a),s.push(i=[])),n=!1,(r=z.exec(a))&&(n=r.shift(),i.push({value:n,type:r[0].replace(B," ")}),a=a.slice(n.length)),b.filter)!(r=G[o].exec(a))||u[o]&&!(r=u[o](r))||(n=r.shift(),i.push({value:n,type:o,matches:r}),a=a.slice(n.length));if(!n)break}return t?a.length:a?se.error(e):x(e,s).slice(0)},f=se.compile=function(e,t){var n,v,y,m,x,r,i=[],o=[],a=N[e+" "];if(!a){t||(t=h(e)),n=t.length;while(n--)(a=Ee(t[n]))[k]?i.push(a):o.push(a);(a=N(e,(v=o,m=0<(y=i).length,x=0<v.length,r=function(e,t,n,r,i){var o,a,s,u=0,l="0",c=e&&[],f=[],p=w,d=e||x&&b.find.TAG("*",i),h=S+=null==p?1:Math.random()||.1,g=d.length;for(i&&(w=t===C||t||i);l!==g&&null!=(o=d[l]);l++){if(x&&o){a=0,t||o.ownerDocument===C||(T(o),n=!E);while(s=v[a++])if(s(o,t||C,n)){r.push(o);break}i&&(S=h)}m&&((o=!s&&o)&&u--,e&&c.push(o))}if(u+=l,m&&l!==u){a=0;while(s=y[a++])s(c,f,t,n);if(e){if(0<u)while(l--)c[l]||f[l]||(f[l]=q.call(r));f=Te(f)}H.apply(r,f),i&&!e&&0<f.length&&1<u+y.length&&se.uniqueSort(r)}return i&&(S=h,w=p),c},m?le(r):r))).selector=e}return a},g=se.select=function(e,t,n,r){var i,o,a,s,u,l="function"==typeof e&&e,c=!r&&h(e=l.selector||e);if(n=n||[],1===c.length){if(2<(o=c[0]=c[0].slice(0)).length&&"ID"===(a=o[0]).type&&9===t.nodeType&&E&&b.relative[o[1].type]){if(!(t=(b.find.ID(a.matches[0].replace(te,ne),t)||[])[0]))return n;l&&(t=t.parentNode),e=e.slice(o.shift().value.length)}i=G.needsContext.test(e)?0:o.length;while(i--){if(a=o[i],b.relative[s=a.type])break;if((u=b.find[s])&&(r=u(a.matches[0].replace(te,ne),ee.test(o[0].type)&&ye(t.parentNode)||t))){if(o.splice(i,1),!(e=r.length&&xe(o)))return H.apply(n,r),n;break}}}return(l||f(e,c))(r,t,!E,n,!t||ee.test(e)&&ye(t.parentNode)||t),n},d.sortStable=k.split("").sort(D).join("")===k,d.detectDuplicates=!!l,T(),d.sortDetached=ce(function(e){return 1&e.compareDocumentPosition(C.createElement("fieldset"))}),ce(function(e){return e.innerHTML="<a href='#'></a>","#"===e.firstChild.getAttribute("href")})||fe("type|href|height|width",function(e,t,n){if(!n)return e.getAttribute(t,"type"===t.toLowerCase()?1:2)}),d.attributes&&ce(function(e){return e.innerHTML="<input/>",e.firstChild.setAttribute("value",""),""===e.firstChild.getAttribute("value")})||fe("value",function(e,t,n){if(!n&&"input"===e.nodeName.toLowerCase())return e.defaultValue}),ce(function(e){return null==e.getAttribute("disabled")})||fe(R,function(e,t,n){var r;if(!n)return!0===e[t]?t.toLowerCase():(r=e.getAttributeNode(t))&&r.specified?r.value:null}),se}(C);k.find=h,k.expr=h.selectors,k.expr[":"]=k.expr.pseudos,k.uniqueSort=k.unique=h.uniqueSort,k.text=h.getText,k.isXMLDoc=h.isXML,k.contains=h.contains,k.escapeSelector=h.escape;var T=function(e,t,n){var r=[],i=void 0!==n;while((e=e[t])&&9!==e.nodeType)if(1===e.nodeType){if(i&&k(e).is(n))break;r.push(e)}return r},S=function(e,t){for(var n=[];e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n},N=k.expr.match.needsContext;function A(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()}var D=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?k.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?k.grep(e,function(e){return e===n!==r}):"string"!=typeof n?k.grep(e,function(e){return-1<i.call(n,e)!==r}):k.filter(n,e,r)}k.filter=function(e,t,n){var r=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===r.nodeType?k.find.matchesSelector(r,e)?[r]:[]:k.find.matches(e,k.grep(t,function(e){return 1===e.nodeType}))},k.fn.extend({find:function(e){var t,n,r=this.length,i=this;if("string"!=typeof e)return this.pushStack(k(e).filter(function(){for(t=0;t<r;t++)if(k.contains(i[t],this))return!0}));for(n=this.pushStack([]),t=0;t<r;t++)k.find(e,i[t],n);return 1<r?k.uniqueSort(n):n},filter:function(e){return this.pushStack(j(this,e||[],!1))},not:function(e){return this.pushStack(j(this,e||[],!0))},is:function(e){return!!j(this,"string"==typeof e&&N.test(e)?k(e):e||[],!1).length}});var q,L=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/;(k.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:L.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof k?t[0]:t,k.merge(this,k.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),D.test(r[1])&&k.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(k):k.makeArray(e,this)}).prototype=k.fn,q=k(E);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}k.fn.extend({has:function(e){var t=k(e,this),n=t.length;return this.filter(function(){for(var e=0;e<n;e++)if(k.contains(this,t[e]))return!0})},closest:function(e,t){var n,r=0,i=this.length,o=[],a="string"!=typeof e&&k(e);if(!N.test(e))for(;r<i;r++)for(n=this[r];n&&n!==t;n=n.parentNode)if(n.nodeType<11&&(a?-1<a.index(n):1===n.nodeType&&k.find.matchesSelector(n,e))){o.push(n);break}return this.pushStack(1<o.length?k.uniqueSort(o):o)},index:function(e){return e?"string"==typeof e?i.call(k(e),this[0]):i.call(this,e.jquery?e[0]:e):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){return this.pushStack(k.uniqueSort(k.merge(this.get(),k(e,t))))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}}),k.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return T(e,"parentNode")},parentsUntil:function(e,t,n){return T(e,"parentNode",n)},next:function(e){return P(e,"nextSibling")},prev:function(e){return P(e,"previousSibling")},nextAll:function(e){return T(e,"nextSibling")},prevAll:function(e){return T(e,"previousSibling")},nextUntil:function(e,t,n){return T(e,"nextSibling",n)},prevUntil:function(e,t,n){return T(e,"previousSibling",n)},siblings:function(e){return S((e.parentNode||{}).firstChild,e)},children:function(e){return S(e.firstChild)},contents:function(e){return"undefined"!=typeof e.contentDocument?e.contentDocument:(A(e,"template")&&(e=e.content||e),k.merge([],e.childNodes))}},function(r,i){k.fn[r]=function(e,t){var n=k.map(this,i,e);return"Until"!==r.slice(-5)&&(t=e),t&&"string"==typeof t&&(n=k.filter(t,n)),1<this.length&&(O[r]||k.uniqueSort(n),H.test(r)&&n.reverse()),this.pushStack(n)}});var R=/[^\x20\t\r\n\f]+/g;function M(e){return e}function I(e){throw e}function W(e,t,n,r){var i;try{e&&m(i=e.promise)?i.call(e).done(t).fail(n):e&&m(i=e.then)?i.call(e,t,n):t.apply(void 0,[e].slice(r))}catch(e){n.apply(void 0,[e])}}k.Callbacks=function(r){var e,n;r="string"==typeof r?(e=r,n={},k.each(e.match(R)||[],function(e,t){n[t]=!0}),n):k.extend({},r);var i,t,o,a,s=[],u=[],l=-1,c=function(){for(a=a||r.once,o=i=!0;u.length;l=-1){t=u.shift();while(++l<s.length)!1===s[l].apply(t[0],t[1])&&r.stopOnFalse&&(l=s.length,t=!1)}r.memory||(t=!1),i=!1,a&&(s=t?[]:"")},f={add:function(){return s&&(t&&!i&&(l=s.length-1,u.push(t)),function n(e){k.each(e,function(e,t){m(t)?r.unique&&f.has(t)||s.push(t):t&&t.length&&"string"!==w(t)&&n(t)})}(arguments),t&&!i&&c()),this},remove:function(){return k.each(arguments,function(e,t){var n;while(-1<(n=k.inArray(t,s,n)))s.splice(n,1),n<=l&&l--}),this},has:function(e){return e?-1<k.inArray(e,s):0<s.length},empty:function(){return s&&(s=[]),this},disable:function(){return a=u=[],s=t="",this},disabled:function(){return!s},lock:function(){return a=u=[],t||i||(s=t=""),this},locked:function(){return!!a},fireWith:function(e,t){return a||(t=[e,(t=t||[]).slice?t.slice():t],u.push(t),i||c()),this},fire:function(){return f.fireWith(this,arguments),this},fired:function(){return!!o}};return f},k.extend({Deferred:function(e){var o=[["notify","progress",k.Callbacks("memory"),k.Callbacks("memory"),2],["resolve","done",k.Callbacks("once memory"),k.Callbacks("once memory"),0,"resolved"],["reject","fail",k.Callbacks("once memory"),k.Callbacks("once memory"),1,"rejected"]],i="pending",a={state:function(){return i},always:function(){return s.done(arguments).fail(arguments),this},"catch":function(e){return a.then(null,e)},pipe:function(){var i=arguments;return k.Deferred(function(r){k.each(o,function(e,t){var n=m(i[t[4]])&&i[t[4]];s[t[1]](function(){var e=n&&n.apply(this,arguments);e&&m(e.promise)?e.promise().progress(r.notify).done(r.resolve).fail(r.reject):r[t[0]+"With"](this,n?[e]:arguments)})}),i=null}).promise()},then:function(t,n,r){var u=0;function l(i,o,a,s){return function(){var n=this,r=arguments,e=function(){var e,t;if(!(i<u)){if((e=a.apply(n,r))===o.promise())throw new TypeError("Thenable self-resolution");t=e&&("object"==typeof e||"function"==typeof e)&&e.then,m(t)?s?t.call(e,l(u,o,M,s),l(u,o,I,s)):(u++,t.call(e,l(u,o,M,s),l(u,o,I,s),l(u,o,M,o.notifyWith))):(a!==M&&(n=void 0,r=[e]),(s||o.resolveWith)(n,r))}},t=s?e:function(){try{e()}catch(e){k.Deferred.exceptionHook&&k.Deferred.exceptionHook(e,t.stackTrace),u<=i+1&&(a!==I&&(n=void 0,r=[e]),o.rejectWith(n,r))}};i?t():(k.Deferred.getStackHook&&(t.stackTrace=k.Deferred.getStackHook()),C.setTimeout(t))}}return k.Deferred(function(e){o[0][3].add(l(0,e,m(r)?r:M,e.notifyWith)),o[1][3].add(l(0,e,m(t)?t:M)),o[2][3].add(l(0,e,m(n)?n:I))}).promise()},promise:function(e){return null!=e?k.extend(e,a):a}},s={};return k.each(o,function(e,t){var n=t[2],r=t[5];a[t[1]]=n.add,r&&n.add(function(){i=r},o[3-e][2].disable,o[3-e][3].disable,o[0][2].lock,o[0][3].lock),n.add(t[3].fire),s[t[0]]=function(){return s[t[0]+"With"](this===s?void 0:this,arguments),this},s[t[0]+"With"]=n.fireWith}),a.promise(s),e&&e.call(s,s),s},when:function(e){var n=arguments.length,t=n,r=Array(t),i=s.call(arguments),o=k.Deferred(),a=function(t){return function(e){r[t]=this,i[t]=1<arguments.length?s.call(arguments):e,--n||o.resolveWith(r,i)}};if(n<=1&&(W(e,o.done(a(t)).resolve,o.reject,!n),"pending"===o.state()||m(i[t]&&i[t].then)))return o.then();while(t--)W(i[t],a(t),o.reject);return o.promise()}});var $=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;k.Deferred.exceptionHook=function(e,t){C.console&&C.console.warn&&e&&$.test(e.name)&&C.console.warn("jQuery.Deferred exception: "+e.message,e.stack,t)},k.readyException=function(e){C.setTimeout(function(){throw e})};var F=k.Deferred();function B(){E.removeEventListener("DOMContentLoaded",B),C.removeEventListener("load",B),k.ready()}k.fn.ready=function(e){return F.then(e)["catch"](function(e){k.readyException(e)}),this},k.extend({isReady:!1,readyWait:1,ready:function(e){(!0===e?--k.readyWait:k.isReady)||(k.isReady=!0)!==e&&0<--k.readyWait||F.resolveWith(E,[k])}}),k.ready.then=F.then,"complete"===E.readyState||"loading"!==E.readyState&&!E.documentElement.doScroll?C.setTimeout(k.ready):(E.addEventListener("DOMContentLoaded",B),C.addEventListener("load",B));var _=function(e,t,n,r,i,o,a){var s=0,u=e.length,l=null==n;if("object"===w(n))for(s in i=!0,n)_(e,t,s,n[s],!0,o,a);else if(void 0!==r&&(i=!0,m(r)||(a=!0),l&&(a?(t.call(e,r),t=null):(l=t,t=function(e,t,n){return l.call(k(e),n)})),t))for(;s<u;s++)t(e[s],n,a?r:r.call(e[s],s,t(e[s],n)));return i?e:l?t.call(e):u?t(e[0],n):o},z=/^-ms-/,U=/-([a-z])/g;function X(e,t){return t.toUpperCase()}function V(e){return e.replace(z,"ms-").replace(U,X)}var G=function(e){return 1===e.nodeType||9===e.nodeType||!+e.nodeType};function Y(){this.expando=k.expando+Y.uid++}Y.uid=1,Y.prototype={cache:function(e){var t=e[this.expando];return t||(t={},G(e)&&(e.nodeType?e[this.expando]=t:Object.defineProperty(e,this.expando,{value:t,configurable:!0}))),t},set:function(e,t,n){var r,i=this.cache(e);if("string"==typeof t)i[V(t)]=n;else for(r in t)i[V(r)]=t[r];return i},get:function(e,t){return void 0===t?this.cache(e):e[this.expando]&&e[this.expando][V(t)]},access:function(e,t,n){return void 0===t||t&&"string"==typeof t&&void 0===n?this.get(e,t):(this.set(e,t,n),void 0!==n?n:t)},remove:function(e,t){var n,r=e[this.expando];if(void 0!==r){if(void 0!==t){n=(t=Array.isArray(t)?t.map(V):(t=V(t))in r?[t]:t.match(R)||[]).length;while(n--)delete r[t[n]]}(void 0===t||k.isEmptyObject(r))&&(e.nodeType?e[this.expando]=void 0:delete e[this.expando])}},hasData:function(e){var t=e[this.expando];return void 0!==t&&!k.isEmptyObject(t)}};var Q=new Y,J=new Y,K=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,Z=/[A-Z]/g;function ee(e,t,n){var r,i;if(void 0===n&&1===e.nodeType)if(r="data-"+t.replace(Z,"-$&").toLowerCase(),"string"==typeof(n=e.getAttribute(r))){try{n="true"===(i=n)||"false"!==i&&("null"===i?null:i===+i+""?+i:K.test(i)?JSON.parse(i):i)}catch(e){}J.set(e,t,n)}else n=void 0;return n}k.extend({hasData:function(e){return J.hasData(e)||Q.hasData(e)},data:function(e,t,n){return J.access(e,t,n)},removeData:function(e,t){J.remove(e,t)},_data:function(e,t,n){return Q.access(e,t,n)},_removeData:function(e,t){Q.remove(e,t)}}),k.fn.extend({data:function(n,e){var t,r,i,o=this[0],a=o&&o.attributes;if(void 0===n){if(this.length&&(i=J.get(o),1===o.nodeType&&!Q.get(o,"hasDataAttrs"))){t=a.length;while(t--)a[t]&&0===(r=a[t].name).indexOf("data-")&&(r=V(r.slice(5)),ee(o,r,i[r]));Q.set(o,"hasDataAttrs",!0)}return i}return"object"==typeof n?this.each(function(){J.set(this,n)}):_(this,function(e){var t;if(o&&void 0===e)return void 0!==(t=J.get(o,n))?t:void 0!==(t=ee(o,n))?t:void 0;this.each(function(){J.set(this,n,e)})},null,e,1<arguments.length,null,!0)},removeData:function(e){return this.each(function(){J.remove(this,e)})}}),k.extend({queue:function(e,t,n){var r;if(e)return t=(t||"fx")+"queue",r=Q.get(e,t),n&&(!r||Array.isArray(n)?r=Q.access(e,t,k.makeArray(n)):r.push(n)),r||[]},dequeue:function(e,t){t=t||"fx";var n=k.queue(e,t),r=n.length,i=n.shift(),o=k._queueHooks(e,t);"inprogress"===i&&(i=n.shift(),r--),i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,function(){k.dequeue(e,t)},o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return Q.get(e,n)||Q.access(e,n,{empty:k.Callbacks("once memory").add(function(){Q.remove(e,[t+"queue",n])})})}}),k.fn.extend({queue:function(t,n){var e=2;return"string"!=typeof t&&(n=t,t="fx",e--),arguments.length<e?k.queue(this[0],t):void 0===n?this:this.each(function(){var e=k.queue(this,t,n);k._queueHooks(this,t),"fx"===t&&"inprogress"!==e[0]&&k.dequeue(this,t)})},dequeue:function(e){return this.each(function(){k.dequeue(this,e)})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,t){var n,r=1,i=k.Deferred(),o=this,a=this.length,s=function(){--r||i.resolveWith(o,[o])};"string"!=typeof e&&(t=e,e=void 0),e=e||"fx";while(a--)(n=Q.get(o[a],e+"queueHooks"))&&n.empty&&(r++,n.empty.add(s));return s(),i.promise(t)}});var te=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,ne=new RegExp("^(?:([+-])=|)("+te+")([a-z%]*)$","i"),re=["Top","Right","Bottom","Left"],ie=E.documentElement,oe=function(e){return k.contains(e.ownerDocument,e)},ae={composed:!0};ie.getRootNode&&(oe=function(e){return k.contains(e.ownerDocument,e)||e.getRootNode(ae)===e.ownerDocument});var se=function(e,t){return"none"===(e=t||e).style.display||""===e.style.display&&oe(e)&&"none"===k.css(e,"display")},ue=function(e,t,n,r){var i,o,a={};for(o in t)a[o]=e.style[o],e.style[o]=t[o];for(o in i=n.apply(e,r||[]),t)e.style[o]=a[o];return i};function le(e,t,n,r){var i,o,a=20,s=r?function(){return r.cur()}:function(){return k.css(e,t,"")},u=s(),l=n&&n[3]||(k.cssNumber[t]?"":"px"),c=e.nodeType&&(k.cssNumber[t]||"px"!==l&&+u)&&ne.exec(k.css(e,t));if(c&&c[3]!==l){u/=2,l=l||c[3],c=+u||1;while(a--)k.style(e,t,c+l),(1-o)*(1-(o=s()/u||.5))<=0&&(a=0),c/=o;c*=2,k.style(e,t,c+l),n=n||[]}return n&&(c=+c||+u||0,i=n[1]?c+(n[1]+1)*n[2]:+n[2],r&&(r.unit=l,r.start=c,r.end=i)),i}var ce={};function fe(e,t){for(var n,r,i,o,a,s,u,l=[],c=0,f=e.length;c<f;c++)(r=e[c]).style&&(n=r.style.display,t?("none"===n&&(l[c]=Q.get(r,"display")||null,l[c]||(r.style.display="")),""===r.style.display&&se(r)&&(l[c]=(u=a=o=void 0,a=(i=r).ownerDocument,s=i.nodeName,(u=ce[s])||(o=a.body.appendChild(a.createElement(s)),u=k.css(o,"display"),o.parentNode.removeChild(o),"none"===u&&(u="block"),ce[s]=u)))):"none"!==n&&(l[c]="none",Q.set(r,"display",n)));for(c=0;c<f;c++)null!=l[c]&&(e[c].style.display=l[c]);return e}k.fn.extend({show:function(){return fe(this,!0)},hide:function(){return fe(this)},toggle:function(e){return"boolean"==typeof e?e?this.show():this.hide():this.each(function(){se(this)?k(this).show():k(this).hide()})}});var pe=/^(?:checkbox|radio)$/i,de=/<([a-z][^\/\0>\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?k.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n<r;n++)Q.set(e[n],"globalEval",!t||Q.get(t[n],"globalEval"))}ge.optgroup=ge.option,ge.tbody=ge.tfoot=ge.colgroup=ge.caption=ge.thead,ge.th=ge.td;var me,xe,be=/<|&#?\w+;/;function we(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d<h;d++)if((o=e[d])||0===o)if("object"===w(o))k.merge(p,o.nodeType?[o]:o);else if(be.test(o)){a=a||f.appendChild(t.createElement("div")),s=(de.exec(o)||["",""])[1].toLowerCase(),u=ge[s]||ge._default,a.innerHTML=u[1]+k.htmlPrefilter(o)+u[2],c=u[0];while(c--)a=a.lastChild;k.merge(p,a.childNodes),(a=f.firstChild).textContent=""}else p.push(t.createTextNode(o));f.textContent="",d=0;while(o=p[d++])if(r&&-1<k.inArray(o,r))i&&i.push(o);else if(l=oe(o),a=ve(f.appendChild(o),"script"),l&&ye(a),n){c=0;while(o=a[c++])he.test(o.type||"")&&n.push(o)}return f}me=E.createDocumentFragment().appendChild(E.createElement("div")),(xe=E.createElement("input")).setAttribute("type","radio"),xe.setAttribute("checked","checked"),xe.setAttribute("name","t"),me.appendChild(xe),y.checkClone=me.cloneNode(!0).cloneNode(!0).lastChild.checked,me.innerHTML="<textarea>x</textarea>",y.noCloneChecked=!!me.cloneNode(!0).lastChild.defaultValue;var Te=/^key/,Ce=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ee=/^([^.]*)(?:\.(.+)|)/;function ke(){return!0}function Se(){return!1}function Ne(e,t){return e===function(){try{return E.activeElement}catch(e){}}()==("focus"===t)}function Ae(e,t,n,r,i,o){var a,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)Ae(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=Se;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return k().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=k.guid++)),e.each(function(){k.event.add(this,t,i,r,n)})}function De(e,i,o){o?(Q.set(e,i,!1),k.event.add(e,i,{namespace:!1,handler:function(e){var t,n,r=Q.get(this,i);if(1&e.isTrigger&&this[i]){if(r.length)(k.event.special[i]||{}).delegateType&&e.stopPropagation();else if(r=s.call(arguments),Q.set(this,i,r),t=o(this,i),this[i](),r!==(n=Q.get(this,i))||t?Q.set(this,i,!1):n={},r!==n)return e.stopImmediatePropagation(),e.preventDefault(),n.value}else r.length&&(Q.set(this,i,{value:k.event.trigger(k.extend(r[0],k.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===Q.get(e,i)&&k.event.add(e,i,ke)}k.event={global:{},add:function(t,e,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.get(t);if(v){n.handler&&(n=(o=n).handler,i=o.selector),i&&k.find.matchesSelector(ie,i),n.guid||(n.guid=k.guid++),(u=v.events)||(u=v.events={}),(a=v.handle)||(a=v.handle=function(e){return"undefined"!=typeof k&&k.event.triggered!==e.type?k.event.dispatch.apply(t,arguments):void 0}),l=(e=(e||"").match(R)||[""]).length;while(l--)d=g=(s=Ee.exec(e[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=k.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=k.event.special[d]||{},c=k.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&k.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(t,r,h,a)||t.addEventListener&&t.addEventListener(d,a)),f.add&&(f.add.call(t,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),k.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.hasData(e)&&Q.get(e);if(v&&(u=v.events)){l=(t=(t||"").match(R)||[""]).length;while(l--)if(d=g=(s=Ee.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d){f=k.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,v.handle)||k.removeEvent(e,d,v.handle),delete u[d])}else for(d in u)k.event.remove(e,d+t[l],n,r,!0);k.isEmptyObject(u)&&Q.remove(e,"handle events")}},dispatch:function(e){var t,n,r,i,o,a,s=k.event.fix(e),u=new Array(arguments.length),l=(Q.get(this,"events")||{})[s.type]||[],c=k.event.special[s.type]||{};for(u[0]=s,t=1;t<arguments.length;t++)u[t]=arguments[t];if(s.delegateTarget=this,!c.preDispatch||!1!==c.preDispatch.call(this,s)){a=k.event.handlers.call(this,s,l),t=0;while((i=a[t++])&&!s.isPropagationStopped()){s.currentTarget=i.elem,n=0;while((o=i.handlers[n++])&&!s.isImmediatePropagationStopped())s.rnamespace&&!1!==o.namespace&&!s.rnamespace.test(o.namespace)||(s.handleObj=o,s.data=o.data,void 0!==(r=((k.event.special[o.origType]||{}).handle||o.handler).apply(i.elem,u))&&!1===(s.result=r)&&(s.preventDefault(),s.stopPropagation()))}return c.postDispatch&&c.postDispatch.call(this,s),s.result}},handlers:function(e,t){var n,r,i,o,a,s=[],u=t.delegateCount,l=e.target;if(u&&l.nodeType&&!("click"===e.type&&1<=e.button))for(;l!==this;l=l.parentNode||this)if(1===l.nodeType&&("click"!==e.type||!0!==l.disabled)){for(o=[],a={},n=0;n<u;n++)void 0===a[i=(r=t[n]).selector+" "]&&(a[i]=r.needsContext?-1<k(i,this).index(l):k.find(i,this,null,[l]).length),a[i]&&o.push(r);o.length&&s.push({elem:l,handlers:o})}return l=this,u<t.length&&s.push({elem:l,handlers:t.slice(u)}),s},addProp:function(t,e){Object.defineProperty(k.Event.prototype,t,{enumerable:!0,configurable:!0,get:m(e)?function(){if(this.originalEvent)return e(this.originalEvent)}:function(){if(this.originalEvent)return this.originalEvent[t]},set:function(e){Object.defineProperty(this,t,{enumerable:!0,configurable:!0,writable:!0,value:e})}})},fix:function(e){return e[k.expando]?e:new k.Event(e)},special:{load:{noBubble:!0},click:{setup:function(e){var t=this||e;return pe.test(t.type)&&t.click&&A(t,"input")&&De(t,"click",ke),!1},trigger:function(e){var t=this||e;return pe.test(t.type)&&t.click&&A(t,"input")&&De(t,"click"),!0},_default:function(e){var t=e.target;return pe.test(t.type)&&t.click&&A(t,"input")&&Q.get(t,"click")||A(t,"a")}},beforeunload:{postDispatch:function(e){void 0!==e.result&&e.originalEvent&&(e.originalEvent.returnValue=e.result)}}}},k.removeEvent=function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n)},k.Event=function(e,t){if(!(this instanceof k.Event))return new k.Event(e,t);e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||void 0===e.defaultPrevented&&!1===e.returnValue?ke:Se,this.target=e.target&&3===e.target.nodeType?e.target.parentNode:e.target,this.currentTarget=e.currentTarget,this.relatedTarget=e.relatedTarget):this.type=e,t&&k.extend(this,t),this.timeStamp=e&&e.timeStamp||Date.now(),this[k.expando]=!0},k.Event.prototype={constructor:k.Event,isDefaultPrevented:Se,isPropagationStopped:Se,isImmediatePropagationStopped:Se,isSimulated:!1,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=ke,e&&!this.isSimulated&&e.preventDefault()},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=ke,e&&!this.isSimulated&&e.stopPropagation()},stopImmediatePropagation:function(){var e=this.originalEvent;this.isImmediatePropagationStopped=ke,e&&!this.isSimulated&&e.stopImmediatePropagation(),this.stopPropagation()}},k.each({altKey:!0,bubbles:!0,cancelable:!0,changedTouches:!0,ctrlKey:!0,detail:!0,eventPhase:!0,metaKey:!0,pageX:!0,pageY:!0,shiftKey:!0,view:!0,"char":!0,code:!0,charCode:!0,key:!0,keyCode:!0,button:!0,buttons:!0,clientX:!0,clientY:!0,offsetX:!0,offsetY:!0,pointerId:!0,pointerType:!0,screenX:!0,screenY:!0,targetTouches:!0,toElement:!0,touches:!0,which:function(e){var t=e.button;return null==e.which&&Te.test(e.type)?null!=e.charCode?e.charCode:e.keyCode:!e.which&&void 0!==t&&Ce.test(e.type)?1&t?1:2&t?3:4&t?2:0:e.which}},k.event.addProp),k.each({focus:"focusin",blur:"focusout"},function(e,t){k.event.special[e]={setup:function(){return De(this,e,Ne),!1},trigger:function(){return De(this,e),!0},delegateType:t}}),k.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(e,i){k.event.special[e]={delegateType:i,bindType:i,handle:function(e){var t,n=e.relatedTarget,r=e.handleObj;return n&&(n===this||k.contains(this,n))||(e.type=r.origType,t=r.handler.apply(this,arguments),e.type=i),t}}}),k.fn.extend({on:function(e,t,n,r){return Ae(this,e,t,n,r)},one:function(e,t,n,r){return Ae(this,e,t,n,r,1)},off:function(e,t,n){var r,i;if(e&&e.preventDefault&&e.handleObj)return r=e.handleObj,k(e.delegateTarget).off(r.namespace?r.origType+"."+r.namespace:r.origType,r.selector,r.handler),this;if("object"==typeof e){for(i in e)this.off(i,t,e[i]);return this}return!1!==t&&"function"!=typeof t||(n=t,t=void 0),!1===n&&(n=Se),this.each(function(){k.event.remove(this,e,n,t)})}});var je=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi,qe=/<script|<style|<link/i,Le=/checked\s*(?:[^=]|=\s*.checked.)/i,He=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;function Oe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&k(e).children("tbody")[0]||e}function Pe(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Re(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Me(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(Q.hasData(e)&&(o=Q.access(e),a=Q.set(t,o),l=o.events))for(i in delete a.handle,a.events={},l)for(n=0,r=l[i].length;n<r;n++)k.event.add(t,i,l[i][n]);J.hasData(e)&&(s=J.access(e),u=k.extend({},s),J.set(t,u))}}function Ie(n,r,i,o){r=g.apply([],r);var e,t,a,s,u,l,c=0,f=n.length,p=f-1,d=r[0],h=m(d);if(h||1<f&&"string"==typeof d&&!y.checkClone&&Le.test(d))return n.each(function(e){var t=n.eq(e);h&&(r[0]=d.call(this,e,t.html())),Ie(t,r,i,o)});if(f&&(t=(e=we(r,n[0].ownerDocument,!1,n,o)).firstChild,1===e.childNodes.length&&(e=t),t||o)){for(s=(a=k.map(ve(e,"script"),Pe)).length;c<f;c++)u=e,c!==p&&(u=k.clone(u,!0,!0),s&&k.merge(a,ve(u,"script"))),i.call(n[c],u,c);if(s)for(l=a[a.length-1].ownerDocument,k.map(a,Re),c=0;c<s;c++)u=a[c],he.test(u.type||"")&&!Q.access(u,"globalEval")&&k.contains(l,u)&&(u.src&&"module"!==(u.type||"").toLowerCase()?k._evalUrl&&!u.noModule&&k._evalUrl(u.src,{nonce:u.nonce||u.getAttribute("nonce")}):b(u.textContent.replace(He,""),u,l))}return n}function We(e,t,n){for(var r,i=t?k.filter(t,e):e,o=0;null!=(r=i[o]);o++)n||1!==r.nodeType||k.cleanData(ve(r)),r.parentNode&&(n&&oe(r)&&ye(ve(r,"script")),r.parentNode.removeChild(r));return e}k.extend({htmlPrefilter:function(e){return e.replace(je,"<$1></$2>")},clone:function(e,t,n){var r,i,o,a,s,u,l,c=e.cloneNode(!0),f=oe(e);if(!(y.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||k.isXMLDoc(e)))for(a=ve(c),r=0,i=(o=ve(e)).length;r<i;r++)s=o[r],u=a[r],void 0,"input"===(l=u.nodeName.toLowerCase())&&pe.test(s.type)?u.checked=s.checked:"input"!==l&&"textarea"!==l||(u.defaultValue=s.defaultValue);if(t)if(n)for(o=o||ve(e),a=a||ve(c),r=0,i=o.length;r<i;r++)Me(o[r],a[r]);else Me(e,c);return 0<(a=ve(c,"script")).length&&ye(a,!f&&ve(e,"script")),c},cleanData:function(e){for(var t,n,r,i=k.event.special,o=0;void 0!==(n=e[o]);o++)if(G(n)){if(t=n[Q.expando]){if(t.events)for(r in t.events)i[r]?k.event.remove(n,r):k.removeEvent(n,r,t.handle);n[Q.expando]=void 0}n[J.expando]&&(n[J.expando]=void 0)}}}),k.fn.extend({detach:function(e){return We(this,e,!0)},remove:function(e){return We(this,e)},text:function(e){return _(this,function(e){return void 0===e?k.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=e)})},null,e,arguments.length)},append:function(){return Ie(this,arguments,function(e){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||Oe(this,e).appendChild(e)})},prepend:function(){return Ie(this,arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=Oe(this,e);t.insertBefore(e,t.firstChild)}})},before:function(){return Ie(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return Ie(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},empty:function(){for(var e,t=0;null!=(e=this[t]);t++)1===e.nodeType&&(k.cleanData(ve(e,!1)),e.textContent="");return this},clone:function(e,t){return e=null!=e&&e,t=null==t?e:t,this.map(function(){return k.clone(this,e,t)})},html:function(e){return _(this,function(e){var t=this[0]||{},n=0,r=this.length;if(void 0===e&&1===t.nodeType)return t.innerHTML;if("string"==typeof e&&!qe.test(e)&&!ge[(de.exec(e)||["",""])[1].toLowerCase()]){e=k.htmlPrefilter(e);try{for(;n<r;n++)1===(t=this[n]||{}).nodeType&&(k.cleanData(ve(t,!1)),t.innerHTML=e);t=0}catch(e){}}t&&this.empty().append(e)},null,e,arguments.length)},replaceWith:function(){var n=[];return Ie(this,arguments,function(e){var t=this.parentNode;k.inArray(this,n)<0&&(k.cleanData(ve(this)),t&&t.replaceChild(e,this))},n)}}),k.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,a){k.fn[e]=function(e){for(var t,n=[],r=k(e),i=r.length-1,o=0;o<=i;o++)t=o===i?this:this.clone(!0),k(r[o])[a](t),u.apply(n,t.get());return this.pushStack(n)}});var $e=new RegExp("^("+te+")(?!px)[a-z%]+$","i"),Fe=function(e){var t=e.ownerDocument.defaultView;return t&&t.opener||(t=C),t.getComputedStyle(e)},Be=new RegExp(re.join("|"),"i");function _e(e,t,n){var r,i,o,a,s=e.style;return(n=n||Fe(e))&&(""!==(a=n.getPropertyValue(t)||n[t])||oe(e)||(a=k.style(e,t)),!y.pixelBoxStyles()&&$e.test(a)&&Be.test(t)&&(r=s.width,i=s.minWidth,o=s.maxWidth,s.minWidth=s.maxWidth=s.width=a,a=n.width,s.width=r,s.minWidth=i,s.maxWidth=o)),void 0!==a?a+"":a}function ze(e,t){return{get:function(){if(!e())return(this.get=t).apply(this,arguments);delete this.get}}}!function(){function e(){if(u){s.style.cssText="position:absolute;left:-11111px;width:60px;margin-top:1px;padding:0;border:0",u.style.cssText="position:relative;display:block;box-sizing:border-box;overflow:scroll;margin:auto;border:1px;padding:1px;width:60%;top:1%",ie.appendChild(s).appendChild(u);var e=C.getComputedStyle(u);n="1%"!==e.top,a=12===t(e.marginLeft),u.style.right="60%",o=36===t(e.right),r=36===t(e.width),u.style.position="absolute",i=12===t(u.offsetWidth/3),ie.removeChild(s),u=null}}function t(e){return Math.round(parseFloat(e))}var n,r,i,o,a,s=E.createElement("div"),u=E.createElement("div");u.style&&(u.style.backgroundClip="content-box",u.cloneNode(!0).style.backgroundClip="",y.clearCloneStyle="content-box"===u.style.backgroundClip,k.extend(y,{boxSizingReliable:function(){return e(),r},pixelBoxStyles:function(){return e(),o},pixelPosition:function(){return e(),n},reliableMarginLeft:function(){return e(),a},scrollboxSize:function(){return e(),i}}))}();var Ue=["Webkit","Moz","ms"],Xe=E.createElement("div").style,Ve={};function Ge(e){var t=k.cssProps[e]||Ve[e];return t||(e in Xe?e:Ve[e]=function(e){var t=e[0].toUpperCase()+e.slice(1),n=Ue.length;while(n--)if((e=Ue[n]+t)in Xe)return e}(e)||e)}var Ye=/^(none|table(?!-c[ea]).+)/,Qe=/^--/,Je={position:"absolute",visibility:"hidden",display:"block"},Ke={letterSpacing:"0",fontWeight:"400"};function Ze(e,t,n){var r=ne.exec(t);return r?Math.max(0,r[2]-(n||0))+(r[3]||"px"):t}function et(e,t,n,r,i,o){var a="width"===t?1:0,s=0,u=0;if(n===(r?"border":"content"))return 0;for(;a<4;a+=2)"margin"===n&&(u+=k.css(e,n+re[a],!0,i)),r?("content"===n&&(u-=k.css(e,"padding"+re[a],!0,i)),"margin"!==n&&(u-=k.css(e,"border"+re[a]+"Width",!0,i))):(u+=k.css(e,"padding"+re[a],!0,i),"padding"!==n?u+=k.css(e,"border"+re[a]+"Width",!0,i):s+=k.css(e,"border"+re[a]+"Width",!0,i));return!r&&0<=o&&(u+=Math.max(0,Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-o-u-s-.5))||0),u}function tt(e,t,n){var r=Fe(e),i=(!y.boxSizingReliable()||n)&&"border-box"===k.css(e,"boxSizing",!1,r),o=i,a=_e(e,t,r),s="offset"+t[0].toUpperCase()+t.slice(1);if($e.test(a)){if(!n)return a;a="auto"}return(!y.boxSizingReliable()&&i||"auto"===a||!parseFloat(a)&&"inline"===k.css(e,"display",!1,r))&&e.getClientRects().length&&(i="border-box"===k.css(e,"boxSizing",!1,r),(o=s in e)&&(a=e[s])),(a=parseFloat(a)||0)+et(e,t,n||(i?"border":"content"),o,r,a)+"px"}function nt(e,t,n,r,i){return new nt.prototype.init(e,t,n,r,i)}k.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=_e(e,"opacity");return""===n?"1":n}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,gridArea:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnStart:!0,gridRow:!0,gridRowEnd:!0,gridRowStart:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{},style:function(e,t,n,r){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var i,o,a,s=V(t),u=Qe.test(t),l=e.style;if(u||(t=Ge(s)),a=k.cssHooks[t]||k.cssHooks[s],void 0===n)return a&&"get"in a&&void 0!==(i=a.get(e,!1,r))?i:l[t];"string"===(o=typeof n)&&(i=ne.exec(n))&&i[1]&&(n=le(e,t,i),o="number"),null!=n&&n==n&&("number"!==o||u||(n+=i&&i[3]||(k.cssNumber[s]?"":"px")),y.clearCloneStyle||""!==n||0!==t.indexOf("background")||(l[t]="inherit"),a&&"set"in a&&void 0===(n=a.set(e,n,r))||(u?l.setProperty(t,n):l[t]=n))}},css:function(e,t,n,r){var i,o,a,s=V(t);return Qe.test(t)||(t=Ge(s)),(a=k.cssHooks[t]||k.cssHooks[s])&&"get"in a&&(i=a.get(e,!0,n)),void 0===i&&(i=_e(e,t,r)),"normal"===i&&t in Ke&&(i=Ke[t]),""===n||n?(o=parseFloat(i),!0===n||isFinite(o)?o||0:i):i}}),k.each(["height","width"],function(e,u){k.cssHooks[u]={get:function(e,t,n){if(t)return!Ye.test(k.css(e,"display"))||e.getClientRects().length&&e.getBoundingClientRect().width?tt(e,u,n):ue(e,Je,function(){return tt(e,u,n)})},set:function(e,t,n){var r,i=Fe(e),o=!y.scrollboxSize()&&"absolute"===i.position,a=(o||n)&&"border-box"===k.css(e,"boxSizing",!1,i),s=n?et(e,u,n,a,i):0;return a&&o&&(s-=Math.ceil(e["offset"+u[0].toUpperCase()+u.slice(1)]-parseFloat(i[u])-et(e,u,"border",!1,i)-.5)),s&&(r=ne.exec(t))&&"px"!==(r[3]||"px")&&(e.style[u]=t,t=k.css(e,u)),Ze(0,t,s)}}}),k.cssHooks.marginLeft=ze(y.reliableMarginLeft,function(e,t){if(t)return(parseFloat(_e(e,"marginLeft"))||e.getBoundingClientRect().left-ue(e,{marginLeft:0},function(){return e.getBoundingClientRect().left}))+"px"}),k.each({margin:"",padding:"",border:"Width"},function(i,o){k.cssHooks[i+o]={expand:function(e){for(var t=0,n={},r="string"==typeof e?e.split(" "):[e];t<4;t++)n[i+re[t]+o]=r[t]||r[t-2]||r[0];return n}},"margin"!==i&&(k.cssHooks[i+o].set=Ze)}),k.fn.extend({css:function(e,t){return _(this,function(e,t,n){var r,i,o={},a=0;if(Array.isArray(t)){for(r=Fe(e),i=t.length;a<i;a++)o[t[a]]=k.css(e,t[a],!1,r);return o}return void 0!==n?k.style(e,t,n):k.css(e,t)},e,t,1<arguments.length)}}),((k.Tween=nt).prototype={constructor:nt,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||k.easing._default,this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(k.cssNumber[n]?"":"px")},cur:function(){var e=nt.propHooks[this.prop];return e&&e.get?e.get(this):nt.propHooks._default.get(this)},run:function(e){var t,n=nt.propHooks[this.prop];return this.options.duration?this.pos=t=k.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):this.pos=t=e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):nt.propHooks._default.set(this),this}}).init.prototype=nt.prototype,(nt.propHooks={_default:{get:function(e){var t;return 1!==e.elem.nodeType||null!=e.elem[e.prop]&&null==e.elem.style[e.prop]?e.elem[e.prop]:(t=k.css(e.elem,e.prop,""))&&"auto"!==t?t:0},set:function(e){k.fx.step[e.prop]?k.fx.step[e.prop](e):1!==e.elem.nodeType||!k.cssHooks[e.prop]&&null==e.elem.style[Ge(e.prop)]?e.elem[e.prop]=e.now:k.style(e.elem,e.prop,e.now+e.unit)}}}).scrollTop=nt.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},k.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2},_default:"swing"},k.fx=nt.prototype.init,k.fx.step={};var rt,it,ot,at,st=/^(?:toggle|show|hide)$/,ut=/queueHooks$/;function lt(){it&&(!1===E.hidden&&C.requestAnimationFrame?C.requestAnimationFrame(lt):C.setTimeout(lt,k.fx.interval),k.fx.tick())}function ct(){return C.setTimeout(function(){rt=void 0}),rt=Date.now()}function ft(e,t){var n,r=0,i={height:e};for(t=t?1:0;r<4;r+=2-t)i["margin"+(n=re[r])]=i["padding"+n]=e;return t&&(i.opacity=i.width=e),i}function pt(e,t,n){for(var r,i=(dt.tweeners[t]||[]).concat(dt.tweeners["*"]),o=0,a=i.length;o<a;o++)if(r=i[o].call(n,t,e))return r}function dt(o,e,t){var n,a,r=0,i=dt.prefilters.length,s=k.Deferred().always(function(){delete u.elem}),u=function(){if(a)return!1;for(var e=rt||ct(),t=Math.max(0,l.startTime+l.duration-e),n=1-(t/l.duration||0),r=0,i=l.tweens.length;r<i;r++)l.tweens[r].run(n);return s.notifyWith(o,[l,n,t]),n<1&&i?t:(i||s.notifyWith(o,[l,1,0]),s.resolveWith(o,[l]),!1)},l=s.promise({elem:o,props:k.extend({},e),opts:k.extend(!0,{specialEasing:{},easing:k.easing._default},t),originalProperties:e,originalOptions:t,startTime:rt||ct(),duration:t.duration,tweens:[],createTween:function(e,t){var n=k.Tween(o,l.opts,e,t,l.opts.specialEasing[e]||l.opts.easing);return l.tweens.push(n),n},stop:function(e){var t=0,n=e?l.tweens.length:0;if(a)return this;for(a=!0;t<n;t++)l.tweens[t].run(1);return e?(s.notifyWith(o,[l,1,0]),s.resolveWith(o,[l,e])):s.rejectWith(o,[l,e]),this}}),c=l.props;for(!function(e,t){var n,r,i,o,a;for(n in e)if(i=t[r=V(n)],o=e[n],Array.isArray(o)&&(i=o[1],o=e[n]=o[0]),n!==r&&(e[r]=o,delete e[n]),(a=k.cssHooks[r])&&"expand"in a)for(n in o=a.expand(o),delete e[r],o)n in e||(e[n]=o[n],t[n]=i);else t[r]=i}(c,l.opts.specialEasing);r<i;r++)if(n=dt.prefilters[r].call(l,o,c,l.opts))return m(n.stop)&&(k._queueHooks(l.elem,l.opts.queue).stop=n.stop.bind(n)),n;return k.map(c,pt,l),m(l.opts.start)&&l.opts.start.call(o,l),l.progress(l.opts.progress).done(l.opts.done,l.opts.complete).fail(l.opts.fail).always(l.opts.always),k.fx.timer(k.extend(u,{elem:o,anim:l,queue:l.opts.queue})),l}k.Animation=k.extend(dt,{tweeners:{"*":[function(e,t){var n=this.createTween(e,t);return le(n.elem,e,ne.exec(t),n),n}]},tweener:function(e,t){m(e)?(t=e,e=["*"]):e=e.match(R);for(var n,r=0,i=e.length;r<i;r++)n=e[r],dt.tweeners[n]=dt.tweeners[n]||[],dt.tweeners[n].unshift(t)},prefilters:[function(e,t,n){var r,i,o,a,s,u,l,c,f="width"in t||"height"in t,p=this,d={},h=e.style,g=e.nodeType&&se(e),v=Q.get(e,"fxshow");for(r in n.queue||(null==(a=k._queueHooks(e,"fx")).unqueued&&(a.unqueued=0,s=a.empty.fire,a.empty.fire=function(){a.unqueued||s()}),a.unqueued++,p.always(function(){p.always(function(){a.unqueued--,k.queue(e,"fx").length||a.empty.fire()})})),t)if(i=t[r],st.test(i)){if(delete t[r],o=o||"toggle"===i,i===(g?"hide":"show")){if("show"!==i||!v||void 0===v[r])continue;g=!0}d[r]=v&&v[r]||k.style(e,r)}if((u=!k.isEmptyObject(t))||!k.isEmptyObject(d))for(r in f&&1===e.nodeType&&(n.overflow=[h.overflow,h.overflowX,h.overflowY],null==(l=v&&v.display)&&(l=Q.get(e,"display")),"none"===(c=k.css(e,"display"))&&(l?c=l:(fe([e],!0),l=e.style.display||l,c=k.css(e,"display"),fe([e]))),("inline"===c||"inline-block"===c&&null!=l)&&"none"===k.css(e,"float")&&(u||(p.done(function(){h.display=l}),null==l&&(c=h.display,l="none"===c?"":c)),h.display="inline-block")),n.overflow&&(h.overflow="hidden",p.always(function(){h.overflow=n.overflow[0],h.overflowX=n.overflow[1],h.overflowY=n.overflow[2]})),u=!1,d)u||(v?"hidden"in v&&(g=v.hidden):v=Q.access(e,"fxshow",{display:l}),o&&(v.hidden=!g),g&&fe([e],!0),p.done(function(){for(r in g||fe([e]),Q.remove(e,"fxshow"),d)k.style(e,r,d[r])})),u=pt(g?v[r]:0,r,p),r in v||(v[r]=u.start,g&&(u.end=u.start,u.start=0))}],prefilter:function(e,t){t?dt.prefilters.unshift(e):dt.prefilters.push(e)}}),k.speed=function(e,t,n){var r=e&&"object"==typeof e?k.extend({},e):{complete:n||!n&&t||m(e)&&e,duration:e,easing:n&&t||t&&!m(t)&&t};return k.fx.off?r.duration=0:"number"!=typeof r.duration&&(r.duration in k.fx.speeds?r.duration=k.fx.speeds[r.duration]:r.duration=k.fx.speeds._default),null!=r.queue&&!0!==r.queue||(r.queue="fx"),r.old=r.complete,r.complete=function(){m(r.old)&&r.old.call(this),r.queue&&k.dequeue(this,r.queue)},r},k.fn.extend({fadeTo:function(e,t,n,r){return this.filter(se).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(t,e,n,r){var i=k.isEmptyObject(t),o=k.speed(e,n,r),a=function(){var e=dt(this,k.extend({},t),o);(i||Q.get(this,"finish"))&&e.stop(!0)};return a.finish=a,i||!1===o.queue?this.each(a):this.queue(o.queue,a)},stop:function(i,e,o){var a=function(e){var t=e.stop;delete e.stop,t(o)};return"string"!=typeof i&&(o=e,e=i,i=void 0),e&&!1!==i&&this.queue(i||"fx",[]),this.each(function(){var e=!0,t=null!=i&&i+"queueHooks",n=k.timers,r=Q.get(this);if(t)r[t]&&r[t].stop&&a(r[t]);else for(t in r)r[t]&&r[t].stop&&ut.test(t)&&a(r[t]);for(t=n.length;t--;)n[t].elem!==this||null!=i&&n[t].queue!==i||(n[t].anim.stop(o),e=!1,n.splice(t,1));!e&&o||k.dequeue(this,i)})},finish:function(a){return!1!==a&&(a=a||"fx"),this.each(function(){var e,t=Q.get(this),n=t[a+"queue"],r=t[a+"queueHooks"],i=k.timers,o=n?n.length:0;for(t.finish=!0,k.queue(this,a,[]),r&&r.stop&&r.stop.call(this,!0),e=i.length;e--;)i[e].elem===this&&i[e].queue===a&&(i[e].anim.stop(!0),i.splice(e,1));for(e=0;e<o;e++)n[e]&&n[e].finish&&n[e].finish.call(this);delete t.finish})}}),k.each(["toggle","show","hide"],function(e,r){var i=k.fn[r];k.fn[r]=function(e,t,n){return null==e||"boolean"==typeof e?i.apply(this,arguments):this.animate(ft(r,!0),e,t,n)}}),k.each({slideDown:ft("show"),slideUp:ft("hide"),slideToggle:ft("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(e,r){k.fn[e]=function(e,t,n){return this.animate(r,e,t,n)}}),k.timers=[],k.fx.tick=function(){var e,t=0,n=k.timers;for(rt=Date.now();t<n.length;t++)(e=n[t])()||n[t]!==e||n.splice(t--,1);n.length||k.fx.stop(),rt=void 0},k.fx.timer=function(e){k.timers.push(e),k.fx.start()},k.fx.interval=13,k.fx.start=function(){it||(it=!0,lt())},k.fx.stop=function(){it=null},k.fx.speeds={slow:600,fast:200,_default:400},k.fn.delay=function(r,e){return r=k.fx&&k.fx.speeds[r]||r,e=e||"fx",this.queue(e,function(e,t){var n=C.setTimeout(e,r);t.stop=function(){C.clearTimeout(n)}})},ot=E.createElement("input"),at=E.createElement("select").appendChild(E.createElement("option")),ot.type="checkbox",y.checkOn=""!==ot.value,y.optSelected=at.selected,(ot=E.createElement("input")).value="t",ot.type="radio",y.radioValue="t"===ot.value;var ht,gt=k.expr.attrHandle;k.fn.extend({attr:function(e,t){return _(this,k.attr,e,t,1<arguments.length)},removeAttr:function(e){return this.each(function(){k.removeAttr(this,e)})}}),k.extend({attr:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return"undefined"==typeof e.getAttribute?k.prop(e,t,n):(1===o&&k.isXMLDoc(e)||(i=k.attrHooks[t.toLowerCase()]||(k.expr.match.bool.test(t)?ht:void 0)),void 0!==n?null===n?void k.removeAttr(e,t):i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:(e.setAttribute(t,n+""),n):i&&"get"in i&&null!==(r=i.get(e,t))?r:null==(r=k.find.attr(e,t))?void 0:r)},attrHooks:{type:{set:function(e,t){if(!y.radioValue&&"radio"===t&&A(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},removeAttr:function(e,t){var n,r=0,i=t&&t.match(R);if(i&&1===e.nodeType)while(n=i[r++])e.removeAttribute(n)}}),ht={set:function(e,t,n){return!1===t?k.removeAttr(e,n):e.setAttribute(n,n),n}},k.each(k.expr.match.bool.source.match(/\w+/g),function(e,t){var a=gt[t]||k.find.attr;gt[t]=function(e,t,n){var r,i,o=t.toLowerCase();return n||(i=gt[o],gt[o]=r,r=null!=a(e,t,n)?o:null,gt[o]=i),r}});var vt=/^(?:input|select|textarea|button)$/i,yt=/^(?:a|area)$/i;function mt(e){return(e.match(R)||[]).join(" ")}function xt(e){return e.getAttribute&&e.getAttribute("class")||""}function bt(e){return Array.isArray(e)?e:"string"==typeof e&&e.match(R)||[]}k.fn.extend({prop:function(e,t){return _(this,k.prop,e,t,1<arguments.length)},removeProp:function(e){return this.each(function(){delete this[k.propFix[e]||e]})}}),k.extend({prop:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return 1===o&&k.isXMLDoc(e)||(t=k.propFix[t]||t,i=k.propHooks[t]),void 0!==n?i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:e[t]=n:i&&"get"in i&&null!==(r=i.get(e,t))?r:e[t]},propHooks:{tabIndex:{get:function(e){var t=k.find.attr(e,"tabindex");return t?parseInt(t,10):vt.test(e.nodeName)||yt.test(e.nodeName)&&e.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),y.optSelected||(k.propHooks.selected={get:function(e){var t=e.parentNode;return t&&t.parentNode&&t.parentNode.selectedIndex,null},set:function(e){var t=e.parentNode;t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex)}}),k.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){k.propFix[this.toLowerCase()]=this}),k.fn.extend({addClass:function(t){var e,n,r,i,o,a,s,u=0;if(m(t))return this.each(function(e){k(this).addClass(t.call(this,e,xt(this)))});if((e=bt(t)).length)while(n=this[u++])if(i=xt(n),r=1===n.nodeType&&" "+mt(i)+" "){a=0;while(o=e[a++])r.indexOf(" "+o+" ")<0&&(r+=o+" ");i!==(s=mt(r))&&n.setAttribute("class",s)}return this},removeClass:function(t){var e,n,r,i,o,a,s,u=0;if(m(t))return this.each(function(e){k(this).removeClass(t.call(this,e,xt(this)))});if(!arguments.length)return this.attr("class","");if((e=bt(t)).length)while(n=this[u++])if(i=xt(n),r=1===n.nodeType&&" "+mt(i)+" "){a=0;while(o=e[a++])while(-1<r.indexOf(" "+o+" "))r=r.replace(" "+o+" "," ");i!==(s=mt(r))&&n.setAttribute("class",s)}return this},toggleClass:function(i,t){var o=typeof i,a="string"===o||Array.isArray(i);return"boolean"==typeof t&&a?t?this.addClass(i):this.removeClass(i):m(i)?this.each(function(e){k(this).toggleClass(i.call(this,e,xt(this),t),t)}):this.each(function(){var e,t,n,r;if(a){t=0,n=k(this),r=bt(i);while(e=r[t++])n.hasClass(e)?n.removeClass(e):n.addClass(e)}else void 0!==i&&"boolean"!==o||((e=xt(this))&&Q.set(this,"__className__",e),this.setAttribute&&this.setAttribute("class",e||!1===i?"":Q.get(this,"__className__")||""))})},hasClass:function(e){var t,n,r=0;t=" "+e+" ";while(n=this[r++])if(1===n.nodeType&&-1<(" "+mt(xt(n))+" ").indexOf(t))return!0;return!1}});var wt=/\r/g;k.fn.extend({val:function(n){var r,e,i,t=this[0];return arguments.length?(i=m(n),this.each(function(e){var t;1===this.nodeType&&(null==(t=i?n.call(this,e,k(this).val()):n)?t="":"number"==typeof t?t+="":Array.isArray(t)&&(t=k.map(t,function(e){return null==e?"":e+""})),(r=k.valHooks[this.type]||k.valHooks[this.nodeName.toLowerCase()])&&"set"in r&&void 0!==r.set(this,t,"value")||(this.value=t))})):t?(r=k.valHooks[t.type]||k.valHooks[t.nodeName.toLowerCase()])&&"get"in r&&void 0!==(e=r.get(t,"value"))?e:"string"==typeof(e=t.value)?e.replace(wt,""):null==e?"":e:void 0}}),k.extend({valHooks:{option:{get:function(e){var t=k.find.attr(e,"value");return null!=t?t:mt(k.text(e))}},select:{get:function(e){var t,n,r,i=e.options,o=e.selectedIndex,a="select-one"===e.type,s=a?null:[],u=a?o+1:i.length;for(r=o<0?u:a?o:0;r<u;r++)if(((n=i[r]).selected||r===o)&&!n.disabled&&(!n.parentNode.disabled||!A(n.parentNode,"optgroup"))){if(t=k(n).val(),a)return t;s.push(t)}return s},set:function(e,t){var n,r,i=e.options,o=k.makeArray(t),a=i.length;while(a--)((r=i[a]).selected=-1<k.inArray(k.valHooks.option.get(r),o))&&(n=!0);return n||(e.selectedIndex=-1),o}}}}),k.each(["radio","checkbox"],function(){k.valHooks[this]={set:function(e,t){if(Array.isArray(t))return e.checked=-1<k.inArray(k(e).val(),t)}},y.checkOn||(k.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})}),y.focusin="onfocusin"in C;var Tt=/^(?:focusinfocus|focusoutblur)$/,Ct=function(e){e.stopPropagation()};k.extend(k.event,{trigger:function(e,t,n,r){var i,o,a,s,u,l,c,f,p=[n||E],d=v.call(e,"type")?e.type:e,h=v.call(e,"namespace")?e.namespace.split("."):[];if(o=f=a=n=n||E,3!==n.nodeType&&8!==n.nodeType&&!Tt.test(d+k.event.triggered)&&(-1<d.indexOf(".")&&(d=(h=d.split(".")).shift(),h.sort()),u=d.indexOf(":")<0&&"on"+d,(e=e[k.expando]?e:new k.Event(d,"object"==typeof e&&e)).isTrigger=r?2:3,e.namespace=h.join("."),e.rnamespace=e.namespace?new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,e.result=void 0,e.target||(e.target=n),t=null==t?[e]:k.makeArray(t,[e]),c=k.event.special[d]||{},r||!c.trigger||!1!==c.trigger.apply(n,t))){if(!r&&!c.noBubble&&!x(n)){for(s=c.delegateType||d,Tt.test(s+d)||(o=o.parentNode);o;o=o.parentNode)p.push(o),a=o;a===(n.ownerDocument||E)&&p.push(a.defaultView||a.parentWindow||C)}i=0;while((o=p[i++])&&!e.isPropagationStopped())f=o,e.type=1<i?s:c.bindType||d,(l=(Q.get(o,"events")||{})[e.type]&&Q.get(o,"handle"))&&l.apply(o,t),(l=u&&o[u])&&l.apply&&G(o)&&(e.result=l.apply(o,t),!1===e.result&&e.preventDefault());return e.type=d,r||e.isDefaultPrevented()||c._default&&!1!==c._default.apply(p.pop(),t)||!G(n)||u&&m(n[d])&&!x(n)&&((a=n[u])&&(n[u]=null),k.event.triggered=d,e.isPropagationStopped()&&f.addEventListener(d,Ct),n[d](),e.isPropagationStopped()&&f.removeEventListener(d,Ct),k.event.triggered=void 0,a&&(n[u]=a)),e.result}},simulate:function(e,t,n){var r=k.extend(new k.Event,n,{type:e,isSimulated:!0});k.event.trigger(r,null,t)}}),k.fn.extend({trigger:function(e,t){return this.each(function(){k.event.trigger(e,t,this)})},triggerHandler:function(e,t){var n=this[0];if(n)return k.event.trigger(e,t,n,!0)}}),y.focusin||k.each({focus:"focusin",blur:"focusout"},function(n,r){var i=function(e){k.event.simulate(r,e.target,k.event.fix(e))};k.event.special[r]={setup:function(){var e=this.ownerDocument||this,t=Q.access(e,r);t||e.addEventListener(n,i,!0),Q.access(e,r,(t||0)+1)},teardown:function(){var e=this.ownerDocument||this,t=Q.access(e,r)-1;t?Q.access(e,r,t):(e.removeEventListener(n,i,!0),Q.remove(e,r))}}});var Et=C.location,kt=Date.now(),St=/\?/;k.parseXML=function(e){var t;if(!e||"string"!=typeof e)return null;try{t=(new C.DOMParser).parseFromString(e,"text/xml")}catch(e){t=void 0}return t&&!t.getElementsByTagName("parsererror").length||k.error("Invalid XML: "+e),t};var Nt=/\[\]$/,At=/\r?\n/g,Dt=/^(?:submit|button|image|reset|file)$/i,jt=/^(?:input|select|textarea|keygen)/i;function qt(n,e,r,i){var t;if(Array.isArray(e))k.each(e,function(e,t){r||Nt.test(n)?i(n,t):qt(n+"["+("object"==typeof t&&null!=t?e:"")+"]",t,r,i)});else if(r||"object"!==w(e))i(n,e);else for(t in e)qt(n+"["+t+"]",e[t],r,i)}k.param=function(e,t){var n,r=[],i=function(e,t){var n=m(t)?t():t;r[r.length]=encodeURIComponent(e)+"="+encodeURIComponent(null==n?"":n)};if(null==e)return"";if(Array.isArray(e)||e.jquery&&!k.isPlainObject(e))k.each(e,function(){i(this.name,this.value)});else for(n in e)qt(n,e[n],t,i);return r.join("&")},k.fn.extend({serialize:function(){return k.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=k.prop(this,"elements");return e?k.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!k(this).is(":disabled")&&jt.test(this.nodeName)&&!Dt.test(e)&&(this.checked||!pe.test(e))}).map(function(e,t){var n=k(this).val();return null==n?null:Array.isArray(n)?k.map(n,function(e){return{name:t.name,value:e.replace(At,"\r\n")}}):{name:t.name,value:n.replace(At,"\r\n")}}).get()}});var Lt=/%20/g,Ht=/#.*$/,Ot=/([?&])_=[^&]*/,Pt=/^(.*?):[ \t]*([^\r\n]*)$/gm,Rt=/^(?:GET|HEAD)$/,Mt=/^\/\//,It={},Wt={},$t="*/".concat("*"),Ft=E.createElement("a");function Bt(o){return function(e,t){"string"!=typeof e&&(t=e,e="*");var n,r=0,i=e.toLowerCase().match(R)||[];if(m(t))while(n=i[r++])"+"===n[0]?(n=n.slice(1)||"*",(o[n]=o[n]||[]).unshift(t)):(o[n]=o[n]||[]).push(t)}}function _t(t,i,o,a){var s={},u=t===Wt;function l(e){var r;return s[e]=!0,k.each(t[e]||[],function(e,t){var n=t(i,o,a);return"string"!=typeof n||u||s[n]?u?!(r=n):void 0:(i.dataTypes.unshift(n),l(n),!1)}),r}return l(i.dataTypes[0])||!s["*"]&&l("*")}function zt(e,t){var n,r,i=k.ajaxSettings.flatOptions||{};for(n in t)void 0!==t[n]&&((i[n]?e:r||(r={}))[n]=t[n]);return r&&k.extend(!0,e,r),e}Ft.href=Et.href,k.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Et.href,type:"GET",isLocal:/^(?:about|app|app-storage|.+-extension|file|res|widget):$/.test(Et.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":$t,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":k.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?zt(zt(e,k.ajaxSettings),t):zt(k.ajaxSettings,e)},ajaxPrefilter:Bt(It),ajaxTransport:Bt(Wt),ajax:function(e,t){"object"==typeof e&&(t=e,e=void 0),t=t||{};var c,f,p,n,d,r,h,g,i,o,v=k.ajaxSetup({},t),y=v.context||v,m=v.context&&(y.nodeType||y.jquery)?k(y):k.event,x=k.Deferred(),b=k.Callbacks("once memory"),w=v.statusCode||{},a={},s={},u="canceled",T={readyState:0,getResponseHeader:function(e){var t;if(h){if(!n){n={};while(t=Pt.exec(p))n[t[1].toLowerCase()+" "]=(n[t[1].toLowerCase()+" "]||[]).concat(t[2])}t=n[e.toLowerCase()+" "]}return null==t?null:t.join(", ")},getAllResponseHeaders:function(){return h?p:null},setRequestHeader:function(e,t){return null==h&&(e=s[e.toLowerCase()]=s[e.toLowerCase()]||e,a[e]=t),this},overrideMimeType:function(e){return null==h&&(v.mimeType=e),this},statusCode:function(e){var t;if(e)if(h)T.always(e[T.status]);else for(t in e)w[t]=[w[t],e[t]];return this},abort:function(e){var t=e||u;return c&&c.abort(t),l(0,t),this}};if(x.promise(T),v.url=((e||v.url||Et.href)+"").replace(Mt,Et.protocol+"//"),v.type=t.method||t.type||v.method||v.type,v.dataTypes=(v.dataType||"*").toLowerCase().match(R)||[""],null==v.crossDomain){r=E.createElement("a");try{r.href=v.url,r.href=r.href,v.crossDomain=Ft.protocol+"//"+Ft.host!=r.protocol+"//"+r.host}catch(e){v.crossDomain=!0}}if(v.data&&v.processData&&"string"!=typeof v.data&&(v.data=k.param(v.data,v.traditional)),_t(It,v,t,T),h)return T;for(i in(g=k.event&&v.global)&&0==k.active++&&k.event.trigger("ajaxStart"),v.type=v.type.toUpperCase(),v.hasContent=!Rt.test(v.type),f=v.url.replace(Ht,""),v.hasContent?v.data&&v.processData&&0===(v.contentType||"").indexOf("application/x-www-form-urlencoded")&&(v.data=v.data.replace(Lt,"+")):(o=v.url.slice(f.length),v.data&&(v.processData||"string"==typeof v.data)&&(f+=(St.test(f)?"&":"?")+v.data,delete v.data),!1===v.cache&&(f=f.replace(Ot,"$1"),o=(St.test(f)?"&":"?")+"_="+kt+++o),v.url=f+o),v.ifModified&&(k.lastModified[f]&&T.setRequestHeader("If-Modified-Since",k.lastModified[f]),k.etag[f]&&T.setRequestHeader("If-None-Match",k.etag[f])),(v.data&&v.hasContent&&!1!==v.contentType||t.contentType)&&T.setRequestHeader("Content-Type",v.contentType),T.setRequestHeader("Accept",v.dataTypes[0]&&v.accepts[v.dataTypes[0]]?v.accepts[v.dataTypes[0]]+("*"!==v.dataTypes[0]?", "+$t+"; q=0.01":""):v.accepts["*"]),v.headers)T.setRequestHeader(i,v.headers[i]);if(v.beforeSend&&(!1===v.beforeSend.call(y,T,v)||h))return T.abort();if(u="abort",b.add(v.complete),T.done(v.success),T.fail(v.error),c=_t(Wt,v,t,T)){if(T.readyState=1,g&&m.trigger("ajaxSend",[T,v]),h)return T;v.async&&0<v.timeout&&(d=C.setTimeout(function(){T.abort("timeout")},v.timeout));try{h=!1,c.send(a,l)}catch(e){if(h)throw e;l(-1,e)}}else l(-1,"No Transport");function l(e,t,n,r){var i,o,a,s,u,l=t;h||(h=!0,d&&C.clearTimeout(d),c=void 0,p=r||"",T.readyState=0<e?4:0,i=200<=e&&e<300||304===e,n&&(s=function(e,t,n){var r,i,o,a,s=e.contents,u=e.dataTypes;while("*"===u[0])u.shift(),void 0===r&&(r=e.mimeType||t.getResponseHeader("Content-Type"));if(r)for(i in s)if(s[i]&&s[i].test(r)){u.unshift(i);break}if(u[0]in n)o=u[0];else{for(i in n){if(!u[0]||e.converters[i+" "+u[0]]){o=i;break}a||(a=i)}o=o||a}if(o)return o!==u[0]&&u.unshift(o),n[o]}(v,T,n)),s=function(e,t,n,r){var i,o,a,s,u,l={},c=e.dataTypes.slice();if(c[1])for(a in e.converters)l[a.toLowerCase()]=e.converters[a];o=c.shift();while(o)if(e.responseFields[o]&&(n[e.responseFields[o]]=t),!u&&r&&e.dataFilter&&(t=e.dataFilter(t,e.dataType)),u=o,o=c.shift())if("*"===o)o=u;else if("*"!==u&&u!==o){if(!(a=l[u+" "+o]||l["* "+o]))for(i in l)if((s=i.split(" "))[1]===o&&(a=l[u+" "+s[0]]||l["* "+s[0]])){!0===a?a=l[i]:!0!==l[i]&&(o=s[0],c.unshift(s[1]));break}if(!0!==a)if(a&&e["throws"])t=a(t);else try{t=a(t)}catch(e){return{state:"parsererror",error:a?e:"No conversion from "+u+" to "+o}}}return{state:"success",data:t}}(v,s,T,i),i?(v.ifModified&&((u=T.getResponseHeader("Last-Modified"))&&(k.lastModified[f]=u),(u=T.getResponseHeader("etag"))&&(k.etag[f]=u)),204===e||"HEAD"===v.type?l="nocontent":304===e?l="notmodified":(l=s.state,o=s.data,i=!(a=s.error))):(a=l,!e&&l||(l="error",e<0&&(e=0))),T.status=e,T.statusText=(t||l)+"",i?x.resolveWith(y,[o,l,T]):x.rejectWith(y,[T,l,a]),T.statusCode(w),w=void 0,g&&m.trigger(i?"ajaxSuccess":"ajaxError",[T,v,i?o:a]),b.fireWith(y,[T,l]),g&&(m.trigger("ajaxComplete",[T,v]),--k.active||k.event.trigger("ajaxStop")))}return T},getJSON:function(e,t,n){return k.get(e,t,n,"json")},getScript:function(e,t){return k.get(e,void 0,t,"script")}}),k.each(["get","post"],function(e,i){k[i]=function(e,t,n,r){return m(t)&&(r=r||n,n=t,t=void 0),k.ajax(k.extend({url:e,type:i,dataType:r,data:t,success:n},k.isPlainObject(e)&&e))}}),k._evalUrl=function(e,t){return k.ajax({url:e,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,converters:{"text script":function(){}},dataFilter:function(e){k.globalEval(e,t)}})},k.fn.extend({wrapAll:function(e){var t;return this[0]&&(m(e)&&(e=e.call(this[0])),t=k(e,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstElementChild)e=e.firstElementChild;return e}).append(this)),this},wrapInner:function(n){return m(n)?this.each(function(e){k(this).wrapInner(n.call(this,e))}):this.each(function(){var e=k(this),t=e.contents();t.length?t.wrapAll(n):e.append(n)})},wrap:function(t){var n=m(t);return this.each(function(e){k(this).wrapAll(n?t.call(this,e):t)})},unwrap:function(e){return this.parent(e).not("body").each(function(){k(this).replaceWith(this.childNodes)}),this}}),k.expr.pseudos.hidden=function(e){return!k.expr.pseudos.visible(e)},k.expr.pseudos.visible=function(e){return!!(e.offsetWidth||e.offsetHeight||e.getClientRects().length)},k.ajaxSettings.xhr=function(){try{return new C.XMLHttpRequest}catch(e){}};var Ut={0:200,1223:204},Xt=k.ajaxSettings.xhr();y.cors=!!Xt&&"withCredentials"in Xt,y.ajax=Xt=!!Xt,k.ajaxTransport(function(i){var o,a;if(y.cors||Xt&&!i.crossDomain)return{send:function(e,t){var n,r=i.xhr();if(r.open(i.type,i.url,i.async,i.username,i.password),i.xhrFields)for(n in i.xhrFields)r[n]=i.xhrFields[n];for(n in i.mimeType&&r.overrideMimeType&&r.overrideMimeType(i.mimeType),i.crossDomain||e["X-Requested-With"]||(e["X-Requested-With"]="XMLHttpRequest"),e)r.setRequestHeader(n,e[n]);o=function(e){return function(){o&&(o=a=r.onload=r.onerror=r.onabort=r.ontimeout=r.onreadystatechange=null,"abort"===e?r.abort():"error"===e?"number"!=typeof r.status?t(0,"error"):t(r.status,r.statusText):t(Ut[r.status]||r.status,r.statusText,"text"!==(r.responseType||"text")||"string"!=typeof r.responseText?{binary:r.response}:{text:r.responseText},r.getAllResponseHeaders()))}},r.onload=o(),a=r.onerror=r.ontimeout=o("error"),void 0!==r.onabort?r.onabort=a:r.onreadystatechange=function(){4===r.readyState&&C.setTimeout(function(){o&&a()})},o=o("abort");try{r.send(i.hasContent&&i.data||null)}catch(e){if(o)throw e}},abort:function(){o&&o()}}}),k.ajaxPrefilter(function(e){e.crossDomain&&(e.contents.script=!1)}),k.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(e){return k.globalEval(e),e}}}),k.ajaxPrefilter("script",function(e){void 0===e.cache&&(e.cache=!1),e.crossDomain&&(e.type="GET")}),k.ajaxTransport("script",function(n){var r,i;if(n.crossDomain||n.scriptAttrs)return{send:function(e,t){r=k("<script>").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Vt,Gt=[],Yt=/(=)\?(?=&|$)|\?\?/;k.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Gt.pop()||k.expando+"_"+kt++;return this[e]=!0,e}}),k.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Yt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Yt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Yt,"$1"+r):!1!==e.jsonp&&(e.url+=(St.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||k.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?k(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Gt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Vt=E.implementation.createHTMLDocument("").body).innerHTML="<form></form><form></form>",2===Vt.childNodes.length),k.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=D.exec(e))?[t.createElement(i[1])]:(i=we([e],t,o),o&&o.length&&k(o).remove(),k.merge([],i.childNodes)));var r,i,o},k.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1<s&&(r=mt(e.slice(s)),e=e.slice(0,s)),m(t)?(n=t,t=void 0):t&&"object"==typeof t&&(i="POST"),0<a.length&&k.ajax({url:e,type:i||"GET",dataType:"html",data:t}).done(function(e){o=arguments,a.html(r?k("<div>").append(k.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},k.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){k.fn[t]=function(e){return this.on(t,e)}}),k.expr.pseudos.animated=function(t){return k.grep(k.timers,function(e){return t===e.elem}).length},k.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=k.css(e,"position"),c=k(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=k.css(e,"top"),u=k.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,k.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},k.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){k.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===k.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===k.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=k(e).offset()).top+=k.css(e,"borderTopWidth",!0),i.left+=k.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-k.css(r,"marginTop",!0),left:t.left-i.left-k.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===k.css(e,"position"))e=e.offsetParent;return e||ie})}}),k.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;k.fn[t]=function(e){return _(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),k.each(["top","left"],function(e,n){k.cssHooks[n]=ze(y.pixelPosition,function(e,t){if(t)return t=_e(e,n),$e.test(t)?k(e).position()[n]+"px":t})}),k.each({Height:"height",Width:"width"},function(a,s){k.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){k.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return _(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?k.css(e,t,i):k.style(e,t,n,i)},s,n?e:void 0,n)}})}),k.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){k.fn[n]=function(e,t){return 0<arguments.length?this.on(n,null,e,t):this.trigger(n)}}),k.fn.extend({hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),k.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)}}),k.proxy=function(e,t){var n,r,i;if("string"==typeof t&&(n=e[t],t=e,e=n),m(e))return r=s.call(arguments,2),(i=function(){return e.apply(t||this,r.concat(s.call(arguments)))}).guid=e.guid=e.guid||k.guid++,i},k.holdReady=function(e){e?k.readyWait++:k.ready(!0)},k.isArray=Array.isArray,k.parseJSON=JSON.parse,k.nodeName=A,k.isFunction=m,k.isWindow=x,k.camelCase=V,k.type=w,k.now=Date.now,k.isNumeric=function(e){var t=k.type(e);return("number"===t||"string"===t)&&!isNaN(e-parseFloat(e))},"function"==typeof define&&define.amd&&define("jquery",[],function(){return k});var Qt=C.jQuery,Jt=C.$;return k.noConflict=function(e){return C.$===k&&(C.$=Jt),e&&C.jQuery===k&&(C.jQuery=Qt),k},e||(C.jQuery=C.$=k),k});
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/click/_winconsole.py
|
# -*- coding: utf-8 -*-
# This module is based on the excellent work by Adam Bartoš who
# provided a lot of what went into the implementation here in
# the discussion to issue1602 in the Python bug tracker.
#
# There are some general differences in regards to how this works
# compared to the original patches as we do not need to patch
# the entire interpreter but just work in our little world of
# echo and prmopt.
import ctypes
import io
import os
import sys
import time
import zlib
from ctypes import byref
from ctypes import c_char
from ctypes import c_char_p
from ctypes import c_int
from ctypes import c_ssize_t
from ctypes import c_ulong
from ctypes import c_void_p
from ctypes import POINTER
from ctypes import py_object
from ctypes import windll
from ctypes import WinError
from ctypes import WINFUNCTYPE
from ctypes.wintypes import DWORD
from ctypes.wintypes import HANDLE
from ctypes.wintypes import LPCWSTR
from ctypes.wintypes import LPWSTR
import msvcrt
from ._compat import _NonClosingTextIOWrapper
from ._compat import PY2
from ._compat import text_type
try:
from ctypes import pythonapi
PyObject_GetBuffer = pythonapi.PyObject_GetBuffer
PyBuffer_Release = pythonapi.PyBuffer_Release
except ImportError:
pythonapi = None
c_ssize_p = POINTER(c_ssize_t)
kernel32 = windll.kernel32
GetStdHandle = kernel32.GetStdHandle
ReadConsoleW = kernel32.ReadConsoleW
WriteConsoleW = kernel32.WriteConsoleW
GetConsoleMode = kernel32.GetConsoleMode
GetLastError = kernel32.GetLastError
GetCommandLineW = WINFUNCTYPE(LPWSTR)(("GetCommandLineW", windll.kernel32))
CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
("CommandLineToArgvW", windll.shell32)
)
LocalFree = WINFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)(
("LocalFree", windll.kernel32)
)
STDIN_HANDLE = GetStdHandle(-10)
STDOUT_HANDLE = GetStdHandle(-11)
STDERR_HANDLE = GetStdHandle(-12)
PyBUF_SIMPLE = 0
PyBUF_WRITABLE = 1
ERROR_SUCCESS = 0
ERROR_NOT_ENOUGH_MEMORY = 8
ERROR_OPERATION_ABORTED = 995
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
EOF = b"\x1a"
MAX_BYTES_WRITTEN = 32767
class Py_buffer(ctypes.Structure):
_fields_ = [
("buf", c_void_p),
("obj", py_object),
("len", c_ssize_t),
("itemsize", c_ssize_t),
("readonly", c_int),
("ndim", c_int),
("format", c_char_p),
("shape", c_ssize_p),
("strides", c_ssize_p),
("suboffsets", c_ssize_p),
("internal", c_void_p),
]
if PY2:
_fields_.insert(-1, ("smalltable", c_ssize_t * 2))
# On PyPy we cannot get buffers so our ability to operate here is
# serverly limited.
if pythonapi is None:
get_buffer = None
else:
def get_buffer(obj, writable=False):
buf = Py_buffer()
flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE
PyObject_GetBuffer(py_object(obj), byref(buf), flags)
try:
buffer_type = c_char * buf.len
return buffer_type.from_address(buf.buf)
finally:
PyBuffer_Release(byref(buf))
class _WindowsConsoleRawIOBase(io.RawIOBase):
def __init__(self, handle):
self.handle = handle
def isatty(self):
io.RawIOBase.isatty(self)
return True
class _WindowsConsoleReader(_WindowsConsoleRawIOBase):
def readable(self):
return True
def readinto(self, b):
bytes_to_be_read = len(b)
if not bytes_to_be_read:
return 0
elif bytes_to_be_read % 2:
raise ValueError(
"cannot read odd number of bytes from UTF-16-LE encoded console"
)
buffer = get_buffer(b, writable=True)
code_units_to_be_read = bytes_to_be_read // 2
code_units_read = c_ulong()
rv = ReadConsoleW(
HANDLE(self.handle),
buffer,
code_units_to_be_read,
byref(code_units_read),
None,
)
if GetLastError() == ERROR_OPERATION_ABORTED:
# wait for KeyboardInterrupt
time.sleep(0.1)
if not rv:
raise OSError("Windows error: {}".format(GetLastError()))
if buffer[0] == EOF:
return 0
return 2 * code_units_read.value
class _WindowsConsoleWriter(_WindowsConsoleRawIOBase):
def writable(self):
return True
@staticmethod
def _get_error_message(errno):
if errno == ERROR_SUCCESS:
return "ERROR_SUCCESS"
elif errno == ERROR_NOT_ENOUGH_MEMORY:
return "ERROR_NOT_ENOUGH_MEMORY"
return "Windows error {}".format(errno)
def write(self, b):
bytes_to_be_written = len(b)
buf = get_buffer(b)
code_units_to_be_written = min(bytes_to_be_written, MAX_BYTES_WRITTEN) // 2
code_units_written = c_ulong()
WriteConsoleW(
HANDLE(self.handle),
buf,
code_units_to_be_written,
byref(code_units_written),
None,
)
bytes_written = 2 * code_units_written.value
if bytes_written == 0 and bytes_to_be_written > 0:
raise OSError(self._get_error_message(GetLastError()))
return bytes_written
class ConsoleStream(object):
def __init__(self, text_stream, byte_stream):
self._text_stream = text_stream
self.buffer = byte_stream
@property
def name(self):
return self.buffer.name
def write(self, x):
if isinstance(x, text_type):
return self._text_stream.write(x)
try:
self.flush()
except Exception:
pass
return self.buffer.write(x)
def writelines(self, lines):
for line in lines:
self.write(line)
def __getattr__(self, name):
return getattr(self._text_stream, name)
def isatty(self):
return self.buffer.isatty()
def __repr__(self):
return "<ConsoleStream name={!r} encoding={!r}>".format(
self.name, self.encoding
)
class WindowsChunkedWriter(object):
"""
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()' which we wrap to write in
limited chunks due to a Windows limitation on binary console streams.
"""
def __init__(self, wrapped):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
total_to_write = len(text)
written = 0
while written < total_to_write:
to_write = min(total_to_write - written, MAX_BYTES_WRITTEN)
self.__wrapped.write(text[written : written + to_write])
written += to_write
_wrapped_std_streams = set()
def _wrap_std_stream(name):
# Python 2 & Windows 7 and below
if (
PY2
and sys.getwindowsversion()[:2] <= (6, 1)
and name not in _wrapped_std_streams
):
setattr(sys, name, WindowsChunkedWriter(getattr(sys, name)))
_wrapped_std_streams.add(name)
def _get_text_stdin(buffer_stream):
text_stream = _NonClosingTextIOWrapper(
io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)),
"utf-16-le",
"strict",
line_buffering=True,
)
return ConsoleStream(text_stream, buffer_stream)
def _get_text_stdout(buffer_stream):
text_stream = _NonClosingTextIOWrapper(
io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)),
"utf-16-le",
"strict",
line_buffering=True,
)
return ConsoleStream(text_stream, buffer_stream)
def _get_text_stderr(buffer_stream):
text_stream = _NonClosingTextIOWrapper(
io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)),
"utf-16-le",
"strict",
line_buffering=True,
)
return ConsoleStream(text_stream, buffer_stream)
if PY2:
def _hash_py_argv():
return zlib.crc32("\x00".join(sys.argv[1:]))
_initial_argv_hash = _hash_py_argv()
def _get_windows_argv():
argc = c_int(0)
argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc))
if not argv_unicode:
raise WinError()
try:
argv = [argv_unicode[i] for i in range(0, argc.value)]
finally:
LocalFree(argv_unicode)
del argv_unicode
if not hasattr(sys, "frozen"):
argv = argv[1:]
while len(argv) > 0:
arg = argv[0]
if not arg.startswith("-") or arg == "-":
break
argv = argv[1:]
if arg.startswith(("-c", "-m")):
break
return argv[1:]
_stream_factories = {
0: _get_text_stdin,
1: _get_text_stdout,
2: _get_text_stderr,
}
def _is_console(f):
if not hasattr(f, "fileno"):
return False
try:
fileno = f.fileno()
except OSError:
return False
handle = msvcrt.get_osfhandle(fileno)
return bool(GetConsoleMode(handle, byref(DWORD())))
def _get_windows_console_stream(f, encoding, errors):
if (
get_buffer is not None
and encoding in ("utf-16-le", None)
and errors in ("strict", None)
and _is_console(f)
):
func = _stream_factories.get(f.fileno())
if func is not None:
if not PY2:
f = getattr(f, "buffer", None)
if f is None:
return None
else:
# If we are on Python 2 we need to set the stream that we
# deal with to binary mode as otherwise the exercise if a
# bit moot. The same problems apply as for
# get_binary_stdin and friends from _compat.
msvcrt.setmode(f.fileno(), os.O_BINARY)
return func(f)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/click/_unicodefun.py
|
import codecs
import os
import sys
from ._compat import PY2
def _find_unicode_literals_frame():
import __future__
if not hasattr(sys, "_getframe"): # not all Python implementations have it
return 0
frm = sys._getframe(1)
idx = 1
while frm is not None:
if frm.f_globals.get("__name__", "").startswith("click."):
frm = frm.f_back
idx += 1
elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag:
return idx
else:
break
return 0
def _check_for_unicode_literals():
if not __debug__:
return
from . import disable_unicode_literals_warning
if not PY2 or disable_unicode_literals_warning:
return
bad_frame = _find_unicode_literals_frame()
if bad_frame <= 0:
return
from warnings import warn
warn(
Warning(
"Click detected the use of the unicode_literals __future__"
" import. This is heavily discouraged because it can"
" introduce subtle bugs in your code. You should instead"
' use explicit u"" literals for your unicode strings. For'
" more information see"
" https://click.palletsprojects.com/python3/"
),
stacklevel=bad_frame,
)
def _verify_python3_env():
"""Ensures that the environment is good for unicode on Python 3."""
if PY2:
return
try:
import locale
fs_enc = codecs.lookup(locale.getpreferredencoding()).name
except Exception:
fs_enc = "ascii"
if fs_enc != "ascii":
return
extra = ""
if os.name == "posix":
import subprocess
try:
rv = subprocess.Popen(
["locale", "-a"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
).communicate()[0]
except OSError:
rv = b""
good_locales = set()
has_c_utf8 = False
# Make sure we're operating on text here.
if isinstance(rv, bytes):
rv = rv.decode("ascii", "replace")
for line in rv.splitlines():
locale = line.strip()
if locale.lower().endswith((".utf-8", ".utf8")):
good_locales.add(locale)
if locale.lower() in ("c.utf8", "c.utf-8"):
has_c_utf8 = True
extra += "\n\n"
if not good_locales:
extra += (
"Additional information: on this system no suitable"
" UTF-8 locales were discovered. This most likely"
" requires resolving by reconfiguring the locale"
" system."
)
elif has_c_utf8:
extra += (
"This system supports the C.UTF-8 locale which is"
" recommended. You might be able to resolve your issue"
" by exporting the following environment variables:\n\n"
" export LC_ALL=C.UTF-8\n"
" export LANG=C.UTF-8"
)
else:
extra += (
"This system lists a couple of UTF-8 supporting locales"
" that you can pick from. The following suitable"
" locales were discovered: {}".format(", ".join(sorted(good_locales)))
)
bad_locale = None
for locale in os.environ.get("LC_ALL"), os.environ.get("LANG"):
if locale and locale.lower().endswith((".utf-8", ".utf8")):
bad_locale = locale
if locale is not None:
break
if bad_locale is not None:
extra += (
"\n\nClick discovered that you exported a UTF-8 locale"
" but the locale system could not pick up from it"
" because it does not exist. The exported locale is"
" '{}' but it is not supported".format(bad_locale)
)
raise RuntimeError(
"Click will abort further execution because Python 3 was"
" configured to use ASCII as encoding for the environment."
" Consult https://click.palletsprojects.com/python3/ for"
" mitigation steps.{}".format(extra)
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/click/_textwrap.py
|
import textwrap
from contextlib import contextmanager
class TextWrapper(textwrap.TextWrapper):
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
space_left = max(width - cur_len, 1)
if self.break_long_words:
last = reversed_chunks[-1]
cut = last[:space_left]
res = last[space_left:]
cur_line.append(cut)
reversed_chunks[-1] = res
elif not cur_line:
cur_line.append(reversed_chunks.pop())
@contextmanager
def extra_indent(self, indent):
old_initial_indent = self.initial_indent
old_subsequent_indent = self.subsequent_indent
self.initial_indent += indent
self.subsequent_indent += indent
try:
yield
finally:
self.initial_indent = old_initial_indent
self.subsequent_indent = old_subsequent_indent
def indent_only(self, text):
rv = []
for idx, line in enumerate(text.splitlines()):
indent = self.initial_indent
if idx > 0:
indent = self.subsequent_indent
rv.append(indent + line)
return "\n".join(rv)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/click/globals.py
|
from threading import local
_local = local()
def get_current_context(silent=False):
"""Returns the current click context. This can be used as a way to
access the current context object from anywhere. This is a more implicit
alternative to the :func:`pass_context` decorator. This function is
primarily useful for helpers such as :func:`echo` which might be
interested in changing its behavior based on the current context.
To push the current context, :meth:`Context.scope` can be used.
.. versionadded:: 5.0
:param silent: if set to `True` the return value is `None` if no context
is available. The default behavior is to raise a
:exc:`RuntimeError`.
"""
try:
return _local.stack[-1]
except (AttributeError, IndexError):
if not silent:
raise RuntimeError("There is no active click context.")
def push_context(ctx):
"""Pushes a new context to the current stack."""
_local.__dict__.setdefault("stack", []).append(ctx)
def pop_context():
"""Removes the top level from the stack."""
_local.stack.pop()
def resolve_color_default(color=None):
""""Internal helper to get the default value of the color flag. If a
value is passed it's returned unchanged, otherwise it's looked up from
the current context.
"""
if color is not None:
return color
ctx = get_current_context(silent=True)
if ctx is not None:
return ctx.color
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/click/__init__.py
|
"""
Click is a simple Python module inspired by the stdlib optparse to make
writing command line scripts fun. Unlike other modules, it's based
around a simple API that does not come with too much magic and is
composable.
"""
from .core import Argument
from .core import BaseCommand
from .core import Command
from .core import CommandCollection
from .core import Context
from .core import Group
from .core import MultiCommand
from .core import Option
from .core import Parameter
from .decorators import argument
from .decorators import command
from .decorators import confirmation_option
from .decorators import group
from .decorators import help_option
from .decorators import make_pass_decorator
from .decorators import option
from .decorators import pass_context
from .decorators import pass_obj
from .decorators import password_option
from .decorators import version_option
from .exceptions import Abort
from .exceptions import BadArgumentUsage
from .exceptions import BadOptionUsage
from .exceptions import BadParameter
from .exceptions import ClickException
from .exceptions import FileError
from .exceptions import MissingParameter
from .exceptions import NoSuchOption
from .exceptions import UsageError
from .formatting import HelpFormatter
from .formatting import wrap_text
from .globals import get_current_context
from .parser import OptionParser
from .termui import clear
from .termui import confirm
from .termui import echo_via_pager
from .termui import edit
from .termui import get_terminal_size
from .termui import getchar
from .termui import launch
from .termui import pause
from .termui import progressbar
from .termui import prompt
from .termui import secho
from .termui import style
from .termui import unstyle
from .types import BOOL
from .types import Choice
from .types import DateTime
from .types import File
from .types import FLOAT
from .types import FloatRange
from .types import INT
from .types import IntRange
from .types import ParamType
from .types import Path
from .types import STRING
from .types import Tuple
from .types import UNPROCESSED
from .types import UUID
from .utils import echo
from .utils import format_filename
from .utils import get_app_dir
from .utils import get_binary_stream
from .utils import get_os_args
from .utils import get_text_stream
from .utils import open_file
# Controls if click should emit the warning about the use of unicode
# literals.
disable_unicode_literals_warning = False
__version__ = "7.1.2"
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/click/core.py
|
import errno
import inspect
import os
import sys
from contextlib import contextmanager
from functools import update_wrapper
from itertools import repeat
from ._compat import isidentifier
from ._compat import iteritems
from ._compat import PY2
from ._compat import string_types
from ._unicodefun import _check_for_unicode_literals
from ._unicodefun import _verify_python3_env
from .exceptions import Abort
from .exceptions import BadParameter
from .exceptions import ClickException
from .exceptions import Exit
from .exceptions import MissingParameter
from .exceptions import UsageError
from .formatting import HelpFormatter
from .formatting import join_options
from .globals import pop_context
from .globals import push_context
from .parser import OptionParser
from .parser import split_opt
from .termui import confirm
from .termui import prompt
from .termui import style
from .types import BOOL
from .types import convert_type
from .types import IntRange
from .utils import echo
from .utils import get_os_args
from .utils import make_default_short_help
from .utils import make_str
from .utils import PacifyFlushWrapper
_missing = object()
SUBCOMMAND_METAVAR = "COMMAND [ARGS]..."
SUBCOMMANDS_METAVAR = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..."
DEPRECATED_HELP_NOTICE = " (DEPRECATED)"
DEPRECATED_INVOKE_NOTICE = "DeprecationWarning: The command %(name)s is deprecated."
def _maybe_show_deprecated_notice(cmd):
if cmd.deprecated:
echo(style(DEPRECATED_INVOKE_NOTICE % {"name": cmd.name}, fg="red"), err=True)
def fast_exit(code):
"""Exit without garbage collection, this speeds up exit by about 10ms for
things like bash completion.
"""
sys.stdout.flush()
sys.stderr.flush()
os._exit(code)
def _bashcomplete(cmd, prog_name, complete_var=None):
"""Internal handler for the bash completion support."""
if complete_var is None:
complete_var = "_{}_COMPLETE".format(prog_name.replace("-", "_").upper())
complete_instr = os.environ.get(complete_var)
if not complete_instr:
return
from ._bashcomplete import bashcomplete
if bashcomplete(cmd, prog_name, complete_var, complete_instr):
fast_exit(1)
def _check_multicommand(base_command, cmd_name, cmd, register=False):
if not base_command.chain or not isinstance(cmd, MultiCommand):
return
if register:
hint = (
"It is not possible to add multi commands as children to"
" another multi command that is in chain mode."
)
else:
hint = (
"Found a multi command as subcommand to a multi command"
" that is in chain mode. This is not supported."
)
raise RuntimeError(
"{}. Command '{}' is set to chain and '{}' was added as"
" subcommand but it in itself is a multi command. ('{}' is a {}"
" within a chained {} named '{}').".format(
hint,
base_command.name,
cmd_name,
cmd_name,
cmd.__class__.__name__,
base_command.__class__.__name__,
base_command.name,
)
)
def batch(iterable, batch_size):
return list(zip(*repeat(iter(iterable), batch_size)))
def invoke_param_callback(callback, ctx, param, value):
code = getattr(callback, "__code__", None)
args = getattr(code, "co_argcount", 3)
if args < 3:
from warnings import warn
warn(
"Parameter callbacks take 3 args, (ctx, param, value). The"
" 2-arg style is deprecated and will be removed in 8.0.".format(callback),
DeprecationWarning,
stacklevel=3,
)
return callback(ctx, value)
return callback(ctx, param, value)
@contextmanager
def augment_usage_errors(ctx, param=None):
"""Context manager that attaches extra information to exceptions."""
try:
yield
except BadParameter as e:
if e.ctx is None:
e.ctx = ctx
if param is not None and e.param is None:
e.param = param
raise
except UsageError as e:
if e.ctx is None:
e.ctx = ctx
raise
def iter_params_for_processing(invocation_order, declaration_order):
"""Given a sequence of parameters in the order as should be considered
for processing and an iterable of parameters that exist, this returns
a list in the correct order as they should be processed.
"""
def sort_key(item):
try:
idx = invocation_order.index(item)
except ValueError:
idx = float("inf")
return (not item.is_eager, idx)
return sorted(declaration_order, key=sort_key)
class Context(object):
"""The context is a special internal object that holds state relevant
for the script execution at every single level. It's normally invisible
to commands unless they opt-in to getting access to it.
The context is useful as it can pass internal objects around and can
control special execution features such as reading data from
environment variables.
A context can be used as context manager in which case it will call
:meth:`close` on teardown.
.. versionadded:: 2.0
Added the `resilient_parsing`, `help_option_names`,
`token_normalize_func` parameters.
.. versionadded:: 3.0
Added the `allow_extra_args` and `allow_interspersed_args`
parameters.
.. versionadded:: 4.0
Added the `color`, `ignore_unknown_options`, and
`max_content_width` parameters.
.. versionadded:: 7.1
Added the `show_default` parameter.
:param command: the command class for this context.
:param parent: the parent context.
:param info_name: the info name for this invocation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it is usually
the name of the script, for commands below it it's
the name of the script.
:param obj: an arbitrary object of user data.
:param auto_envvar_prefix: the prefix to use for automatic environment
variables. If this is `None` then reading
from environment variables is disabled. This
does not affect manually set environment
variables which are always read.
:param default_map: a dictionary (like object) with default values
for parameters.
:param terminal_width: the width of the terminal. The default is
inherit from parent context. If no context
defines the terminal width then auto
detection will be applied.
:param max_content_width: the maximum width for content rendered by
Click (this currently only affects help
pages). This defaults to 80 characters if
not overridden. In other words: even if the
terminal is larger than that, Click will not
format things wider than 80 characters by
default. In addition to that, formatters might
add some safety mapping on the right.
:param resilient_parsing: if this flag is enabled then Click will
parse without any interactivity or callback
invocation. Default values will also be
ignored. This is useful for implementing
things such as completion support.
:param allow_extra_args: if this is set to `True` then extra arguments
at the end will not raise an error and will be
kept on the context. The default is to inherit
from the command.
:param allow_interspersed_args: if this is set to `False` then options
and arguments cannot be mixed. The
default is to inherit from the command.
:param ignore_unknown_options: instructs click to ignore options it does
not know and keeps them for later
processing.
:param help_option_names: optionally a list of strings that define how
the default help parameter is named. The
default is ``['--help']``.
:param token_normalize_func: an optional function that is used to
normalize tokens (options, choices,
etc.). This for instance can be used to
implement case insensitive behavior.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are used in texts that Click prints which is by
default not the case. This for instance would affect
help output.
:param show_default: if True, shows defaults for all options.
Even if an option is later created with show_default=False,
this command-level setting overrides it.
"""
def __init__(
self,
command,
parent=None,
info_name=None,
obj=None,
auto_envvar_prefix=None,
default_map=None,
terminal_width=None,
max_content_width=None,
resilient_parsing=False,
allow_extra_args=None,
allow_interspersed_args=None,
ignore_unknown_options=None,
help_option_names=None,
token_normalize_func=None,
color=None,
show_default=None,
):
#: the parent context or `None` if none exists.
self.parent = parent
#: the :class:`Command` for this context.
self.command = command
#: the descriptive information name
self.info_name = info_name
#: the parsed parameters except if the value is hidden in which
#: case it's not remembered.
self.params = {}
#: the leftover arguments.
self.args = []
#: protected arguments. These are arguments that are prepended
#: to `args` when certain parsing scenarios are encountered but
#: must be never propagated to another arguments. This is used
#: to implement nested parsing.
self.protected_args = []
if obj is None and parent is not None:
obj = parent.obj
#: the user object stored.
self.obj = obj
self._meta = getattr(parent, "meta", {})
#: A dictionary (-like object) with defaults for parameters.
if (
default_map is None
and parent is not None
and parent.default_map is not None
):
default_map = parent.default_map.get(info_name)
self.default_map = default_map
#: This flag indicates if a subcommand is going to be executed. A
#: group callback can use this information to figure out if it's
#: being executed directly or because the execution flow passes
#: onwards to a subcommand. By default it's None, but it can be
#: the name of the subcommand to execute.
#:
#: If chaining is enabled this will be set to ``'*'`` in case
#: any commands are executed. It is however not possible to
#: figure out which ones. If you require this knowledge you
#: should use a :func:`resultcallback`.
self.invoked_subcommand = None
if terminal_width is None and parent is not None:
terminal_width = parent.terminal_width
#: The width of the terminal (None is autodetection).
self.terminal_width = terminal_width
if max_content_width is None and parent is not None:
max_content_width = parent.max_content_width
#: The maximum width of formatted content (None implies a sensible
#: default which is 80 for most things).
self.max_content_width = max_content_width
if allow_extra_args is None:
allow_extra_args = command.allow_extra_args
#: Indicates if the context allows extra args or if it should
#: fail on parsing.
#:
#: .. versionadded:: 3.0
self.allow_extra_args = allow_extra_args
if allow_interspersed_args is None:
allow_interspersed_args = command.allow_interspersed_args
#: Indicates if the context allows mixing of arguments and
#: options or not.
#:
#: .. versionadded:: 3.0
self.allow_interspersed_args = allow_interspersed_args
if ignore_unknown_options is None:
ignore_unknown_options = command.ignore_unknown_options
#: Instructs click to ignore options that a command does not
#: understand and will store it on the context for later
#: processing. This is primarily useful for situations where you
#: want to call into external programs. Generally this pattern is
#: strongly discouraged because it's not possibly to losslessly
#: forward all arguments.
#:
#: .. versionadded:: 4.0
self.ignore_unknown_options = ignore_unknown_options
if help_option_names is None:
if parent is not None:
help_option_names = parent.help_option_names
else:
help_option_names = ["--help"]
#: The names for the help options.
self.help_option_names = help_option_names
if token_normalize_func is None and parent is not None:
token_normalize_func = parent.token_normalize_func
#: An optional normalization function for tokens. This is
#: options, choices, commands etc.
self.token_normalize_func = token_normalize_func
#: Indicates if resilient parsing is enabled. In that case Click
#: will do its best to not cause any failures and default values
#: will be ignored. Useful for completion.
self.resilient_parsing = resilient_parsing
# If there is no envvar prefix yet, but the parent has one and
# the command on this level has a name, we can expand the envvar
# prefix automatically.
if auto_envvar_prefix is None:
if (
parent is not None
and parent.auto_envvar_prefix is not None
and self.info_name is not None
):
auto_envvar_prefix = "{}_{}".format(
parent.auto_envvar_prefix, self.info_name.upper()
)
else:
auto_envvar_prefix = auto_envvar_prefix.upper()
if auto_envvar_prefix is not None:
auto_envvar_prefix = auto_envvar_prefix.replace("-", "_")
self.auto_envvar_prefix = auto_envvar_prefix
if color is None and parent is not None:
color = parent.color
#: Controls if styling output is wanted or not.
self.color = color
self.show_default = show_default
self._close_callbacks = []
self._depth = 0
def __enter__(self):
self._depth += 1
push_context(self)
return self
def __exit__(self, exc_type, exc_value, tb):
self._depth -= 1
if self._depth == 0:
self.close()
pop_context()
@contextmanager
def scope(self, cleanup=True):
"""This helper method can be used with the context object to promote
it to the current thread local (see :func:`get_current_context`).
The default behavior of this is to invoke the cleanup functions which
can be disabled by setting `cleanup` to `False`. The cleanup
functions are typically used for things such as closing file handles.
If the cleanup is intended the context object can also be directly
used as a context manager.
Example usage::
with ctx.scope():
assert get_current_context() is ctx
This is equivalent::
with ctx:
assert get_current_context() is ctx
.. versionadded:: 5.0
:param cleanup: controls if the cleanup functions should be run or
not. The default is to run these functions. In
some situations the context only wants to be
temporarily pushed in which case this can be disabled.
Nested pushes automatically defer the cleanup.
"""
if not cleanup:
self._depth += 1
try:
with self as rv:
yield rv
finally:
if not cleanup:
self._depth -= 1
@property
def meta(self):
"""This is a dictionary which is shared with all the contexts
that are nested. It exists so that click utilities can store some
state here if they need to. It is however the responsibility of
that code to manage this dictionary well.
The keys are supposed to be unique dotted strings. For instance
module paths are a good choice for it. What is stored in there is
irrelevant for the operation of click. However what is important is
that code that places data here adheres to the general semantics of
the system.
Example usage::
LANG_KEY = f'{__name__}.lang'
def set_language(value):
ctx = get_current_context()
ctx.meta[LANG_KEY] = value
def get_language():
return get_current_context().meta.get(LANG_KEY, 'en_US')
.. versionadded:: 5.0
"""
return self._meta
def make_formatter(self):
"""Creates the formatter for the help and usage output."""
return HelpFormatter(
width=self.terminal_width, max_width=self.max_content_width
)
def call_on_close(self, f):
"""This decorator remembers a function as callback that should be
executed when the context tears down. This is most useful to bind
resource handling to the script execution. For instance, file objects
opened by the :class:`File` type will register their close callbacks
here.
:param f: the function to execute on teardown.
"""
self._close_callbacks.append(f)
return f
def close(self):
"""Invokes all close callbacks."""
for cb in self._close_callbacks:
cb()
self._close_callbacks = []
@property
def command_path(self):
"""The computed command path. This is used for the ``usage``
information on the help page. It's automatically created by
combining the info names of the chain of contexts to the root.
"""
rv = ""
if self.info_name is not None:
rv = self.info_name
if self.parent is not None:
rv = "{} {}".format(self.parent.command_path, rv)
return rv.lstrip()
def find_root(self):
"""Finds the outermost context."""
node = self
while node.parent is not None:
node = node.parent
return node
def find_object(self, object_type):
"""Finds the closest object of a given type."""
node = self
while node is not None:
if isinstance(node.obj, object_type):
return node.obj
node = node.parent
def ensure_object(self, object_type):
"""Like :meth:`find_object` but sets the innermost object to a
new instance of `object_type` if it does not exist.
"""
rv = self.find_object(object_type)
if rv is None:
self.obj = rv = object_type()
return rv
def lookup_default(self, name):
"""Looks up the default for a parameter name. This by default
looks into the :attr:`default_map` if available.
"""
if self.default_map is not None:
rv = self.default_map.get(name)
if callable(rv):
rv = rv()
return rv
def fail(self, message):
"""Aborts the execution of the program with a specific error
message.
:param message: the error message to fail with.
"""
raise UsageError(message, self)
def abort(self):
"""Aborts the script."""
raise Abort()
def exit(self, code=0):
"""Exits the application with a given exit code."""
raise Exit(code)
def get_usage(self):
"""Helper method to get formatted usage string for the current
context and command.
"""
return self.command.get_usage(self)
def get_help(self):
"""Helper method to get formatted help page for the current
context and command.
"""
return self.command.get_help(self)
def invoke(*args, **kwargs): # noqa: B902
"""Invokes a command callback in exactly the way it expects. There
are two ways to invoke this method:
1. the first argument can be a callback and all other arguments and
keyword arguments are forwarded directly to the function.
2. the first argument is a click command object. In that case all
arguments are forwarded as well but proper click parameters
(options and click arguments) must be keyword arguments and Click
will fill in defaults.
Note that before Click 3.2 keyword arguments were not properly filled
in against the intention of this code and no context was created. For
more information about this change and why it was done in a bugfix
release see :ref:`upgrade-to-3.2`.
"""
self, callback = args[:2]
ctx = self
# It's also possible to invoke another command which might or
# might not have a callback. In that case we also fill
# in defaults and make a new context for this command.
if isinstance(callback, Command):
other_cmd = callback
callback = other_cmd.callback
ctx = Context(other_cmd, info_name=other_cmd.name, parent=self)
if callback is None:
raise TypeError(
"The given command does not have a callback that can be invoked."
)
for param in other_cmd.params:
if param.name not in kwargs and param.expose_value:
kwargs[param.name] = param.get_default(ctx)
args = args[2:]
with augment_usage_errors(self):
with ctx:
return callback(*args, **kwargs)
def forward(*args, **kwargs): # noqa: B902
"""Similar to :meth:`invoke` but fills in default keyword
arguments from the current context if the other command expects
it. This cannot invoke callbacks directly, only other commands.
"""
self, cmd = args[:2]
# It's also possible to invoke another command which might or
# might not have a callback.
if not isinstance(cmd, Command):
raise TypeError("Callback is not a command.")
for param in self.params:
if param not in kwargs:
kwargs[param] = self.params[param]
return self.invoke(cmd, **kwargs)
class BaseCommand(object):
"""The base command implements the minimal API contract of commands.
Most code will never use this as it does not implement a lot of useful
functionality but it can act as the direct subclass of alternative
parsing methods that do not depend on the Click parser.
For instance, this can be used to bridge Click and other systems like
argparse or docopt.
Because base commands do not implement a lot of the API that other
parts of Click take for granted, they are not supported for all
operations. For instance, they cannot be used with the decorators
usually and they have no built-in callback system.
.. versionchanged:: 2.0
Added the `context_settings` parameter.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
"""
#: the default for the :attr:`Context.allow_extra_args` flag.
allow_extra_args = False
#: the default for the :attr:`Context.allow_interspersed_args` flag.
allow_interspersed_args = True
#: the default for the :attr:`Context.ignore_unknown_options` flag.
ignore_unknown_options = False
def __init__(self, name, context_settings=None):
#: the name the command thinks it has. Upon registering a command
#: on a :class:`Group` the group will default the command name
#: with this information. You should instead use the
#: :class:`Context`\'s :attr:`~Context.info_name` attribute.
self.name = name
if context_settings is None:
context_settings = {}
#: an optional dictionary with defaults passed to the context.
self.context_settings = context_settings
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.name)
def get_usage(self, ctx):
raise NotImplementedError("Base commands cannot get usage")
def get_help(self, ctx):
raise NotImplementedError("Base commands cannot get help")
def make_context(self, info_name, args, parent=None, **extra):
"""This function when given an info name and arguments will kick
off the parsing and create a new :class:`Context`. It does not
invoke the actual command callback though.
:param info_name: the info name for this invokation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it's usually
the name of the script, for commands below it it's
the name of the script.
:param args: the arguments to parse as list of strings.
:param parent: the parent context if available.
:param extra: extra keyword arguments forwarded to the context
constructor.
"""
for key, value in iteritems(self.context_settings):
if key not in extra:
extra[key] = value
ctx = Context(self, info_name=info_name, parent=parent, **extra)
with ctx.scope(cleanup=False):
self.parse_args(ctx, args)
return ctx
def parse_args(self, ctx, args):
"""Given a context and a list of arguments this creates the parser
and parses the arguments, then modifies the context as necessary.
This is automatically invoked by :meth:`make_context`.
"""
raise NotImplementedError("Base commands do not know how to parse arguments.")
def invoke(self, ctx):
"""Given a context, this invokes the command. The default
implementation is raising a not implemented error.
"""
raise NotImplementedError("Base commands are not invokable by default")
def main(
self,
args=None,
prog_name=None,
complete_var=None,
standalone_mode=True,
**extra
):
"""This is the way to invoke a script with all the bells and
whistles as a command line application. This will always terminate
the application after a call. If this is not wanted, ``SystemExit``
needs to be caught.
This method is also available by directly calling the instance of
a :class:`Command`.
.. versionadded:: 3.0
Added the `standalone_mode` flag to control the standalone mode.
:param args: the arguments that should be used for parsing. If not
provided, ``sys.argv[1:]`` is used.
:param prog_name: the program name that should be used. By default
the program name is constructed by taking the file
name from ``sys.argv[0]``.
:param complete_var: the environment variable that controls the
bash completion support. The default is
``"_<prog_name>_COMPLETE"`` with prog_name in
uppercase.
:param standalone_mode: the default behavior is to invoke the script
in standalone mode. Click will then
handle exceptions and convert them into
error messages and the function will never
return but shut down the interpreter. If
this is set to `False` they will be
propagated to the caller and the return
value of this function is the return value
of :meth:`invoke`.
:param extra: extra keyword arguments are forwarded to the context
constructor. See :class:`Context` for more information.
"""
# If we are in Python 3, we will verify that the environment is
# sane at this point or reject further execution to avoid a
# broken script.
if not PY2:
_verify_python3_env()
else:
_check_for_unicode_literals()
if args is None:
args = get_os_args()
else:
args = list(args)
if prog_name is None:
prog_name = make_str(
os.path.basename(sys.argv[0] if sys.argv else __file__)
)
# Hook for the Bash completion. This only activates if the Bash
# completion is actually enabled, otherwise this is quite a fast
# noop.
_bashcomplete(self, prog_name, complete_var)
try:
try:
with self.make_context(prog_name, args, **extra) as ctx:
rv = self.invoke(ctx)
if not standalone_mode:
return rv
# it's not safe to `ctx.exit(rv)` here!
# note that `rv` may actually contain data like "1" which
# has obvious effects
# more subtle case: `rv=[None, None]` can come out of
# chained commands which all returned `None` -- so it's not
# even always obvious that `rv` indicates success/failure
# by its truthiness/falsiness
ctx.exit()
except (EOFError, KeyboardInterrupt):
echo(file=sys.stderr)
raise Abort()
except ClickException as e:
if not standalone_mode:
raise
e.show()
sys.exit(e.exit_code)
except IOError as e:
if e.errno == errno.EPIPE:
sys.stdout = PacifyFlushWrapper(sys.stdout)
sys.stderr = PacifyFlushWrapper(sys.stderr)
sys.exit(1)
else:
raise
except Exit as e:
if standalone_mode:
sys.exit(e.exit_code)
else:
# in non-standalone mode, return the exit code
# note that this is only reached if `self.invoke` above raises
# an Exit explicitly -- thus bypassing the check there which
# would return its result
# the results of non-standalone execution may therefore be
# somewhat ambiguous: if there are codepaths which lead to
# `ctx.exit(1)` and to `return 1`, the caller won't be able to
# tell the difference between the two
return e.exit_code
except Abort:
if not standalone_mode:
raise
echo("Aborted!", file=sys.stderr)
sys.exit(1)
def __call__(self, *args, **kwargs):
"""Alias for :meth:`main`."""
return self.main(*args, **kwargs)
class Command(BaseCommand):
"""Commands are the basic building block of command line interfaces in
Click. A basic command handles command line parsing and might dispatch
more parsing to commands nested below it.
.. versionchanged:: 2.0
Added the `context_settings` parameter.
.. versionchanged:: 7.1
Added the `no_args_is_help` parameter.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
:param callback: the callback to invoke. This is optional.
:param params: the parameters to register with this command. This can
be either :class:`Option` or :class:`Argument` objects.
:param help: the help string to use for this command.
:param epilog: like the help string but it's printed at the end of the
help page after everything else.
:param short_help: the short help to use for this command. This is
shown on the command listing of the parent command.
:param add_help_option: by default each command registers a ``--help``
option. This can be disabled by this parameter.
:param no_args_is_help: this controls what happens if no arguments are
provided. This option is disabled by default.
If enabled this will add ``--help`` as argument
if no arguments are passed
:param hidden: hide this command from help outputs.
:param deprecated: issues a message indicating that
the command is deprecated.
"""
def __init__(
self,
name,
context_settings=None,
callback=None,
params=None,
help=None,
epilog=None,
short_help=None,
options_metavar="[OPTIONS]",
add_help_option=True,
no_args_is_help=False,
hidden=False,
deprecated=False,
):
BaseCommand.__init__(self, name, context_settings)
#: the callback to execute when the command fires. This might be
#: `None` in which case nothing happens.
self.callback = callback
#: the list of parameters for this command in the order they
#: should show up in the help page and execute. Eager parameters
#: will automatically be handled before non eager ones.
self.params = params or []
# if a form feed (page break) is found in the help text, truncate help
# text to the content preceding the first form feed
if help and "\f" in help:
help = help.split("\f", 1)[0]
self.help = help
self.epilog = epilog
self.options_metavar = options_metavar
self.short_help = short_help
self.add_help_option = add_help_option
self.no_args_is_help = no_args_is_help
self.hidden = hidden
self.deprecated = deprecated
def get_usage(self, ctx):
"""Formats the usage line into a string and returns it.
Calls :meth:`format_usage` internally.
"""
formatter = ctx.make_formatter()
self.format_usage(ctx, formatter)
return formatter.getvalue().rstrip("\n")
def get_params(self, ctx):
rv = self.params
help_option = self.get_help_option(ctx)
if help_option is not None:
rv = rv + [help_option]
return rv
def format_usage(self, ctx, formatter):
"""Writes the usage line into the formatter.
This is a low-level method called by :meth:`get_usage`.
"""
pieces = self.collect_usage_pieces(ctx)
formatter.write_usage(ctx.command_path, " ".join(pieces))
def collect_usage_pieces(self, ctx):
"""Returns all the pieces that go into the usage line and returns
it as a list of strings.
"""
rv = [self.options_metavar]
for param in self.get_params(ctx):
rv.extend(param.get_usage_pieces(ctx))
return rv
def get_help_option_names(self, ctx):
"""Returns the names for the help option."""
all_names = set(ctx.help_option_names)
for param in self.params:
all_names.difference_update(param.opts)
all_names.difference_update(param.secondary_opts)
return all_names
def get_help_option(self, ctx):
"""Returns the help option object."""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help="Show this message and exit.",
)
def make_parser(self, ctx):
"""Creates the underlying option parser for this command."""
parser = OptionParser(ctx)
for param in self.get_params(ctx):
param.add_to_parser(parser, ctx)
return parser
def get_help(self, ctx):
"""Formats the help into a string and returns it.
Calls :meth:`format_help` internally.
"""
formatter = ctx.make_formatter()
self.format_help(ctx, formatter)
return formatter.getvalue().rstrip("\n")
def get_short_help_str(self, limit=45):
"""Gets short help for the command or makes it by shortening the
long help string.
"""
return (
self.short_help
or self.help
and make_default_short_help(self.help, limit)
or ""
)
def format_help(self, ctx, formatter):
"""Writes the help into the formatter if it exists.
This is a low-level method called by :meth:`get_help`.
This calls the following methods:
- :meth:`format_usage`
- :meth:`format_help_text`
- :meth:`format_options`
- :meth:`format_epilog`
"""
self.format_usage(ctx, formatter)
self.format_help_text(ctx, formatter)
self.format_options(ctx, formatter)
self.format_epilog(ctx, formatter)
def format_help_text(self, ctx, formatter):
"""Writes the help text to the formatter if it exists."""
if self.help:
formatter.write_paragraph()
with formatter.indentation():
help_text = self.help
if self.deprecated:
help_text += DEPRECATED_HELP_NOTICE
formatter.write_text(help_text)
elif self.deprecated:
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(DEPRECATED_HELP_NOTICE)
def format_options(self, ctx, formatter):
"""Writes all the options into the formatter if they exist."""
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
opts.append(rv)
if opts:
with formatter.section("Options"):
formatter.write_dl(opts)
def format_epilog(self, ctx, formatter):
"""Writes the epilog into the formatter if it exists."""
if self.epilog:
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(self.epilog)
def parse_args(self, ctx, args):
if not args and self.no_args_is_help and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
parser = self.make_parser(ctx)
opts, args, param_order = parser.parse_args(args=args)
for param in iter_params_for_processing(param_order, self.get_params(ctx)):
value, args = param.handle_parse_result(ctx, opts, args)
if args and not ctx.allow_extra_args and not ctx.resilient_parsing:
ctx.fail(
"Got unexpected extra argument{} ({})".format(
"s" if len(args) != 1 else "", " ".join(map(make_str, args))
)
)
ctx.args = args
return args
def invoke(self, ctx):
"""Given a context, this invokes the attached callback (if it exists)
in the right way.
"""
_maybe_show_deprecated_notice(self)
if self.callback is not None:
return ctx.invoke(self.callback, **ctx.params)
class MultiCommand(Command):
"""A multi command is the basic implementation of a command that
dispatches to subcommands. The most common version is the
:class:`Group`.
:param invoke_without_command: this controls how the multi command itself
is invoked. By default it's only invoked
if a subcommand is provided.
:param no_args_is_help: this controls what happens if no arguments are
provided. This option is enabled by default if
`invoke_without_command` is disabled or disabled
if it's enabled. If enabled this will add
``--help`` as argument if no arguments are
passed.
:param subcommand_metavar: the string that is used in the documentation
to indicate the subcommand place.
:param chain: if this is set to `True` chaining of multiple subcommands
is enabled. This restricts the form of commands in that
they cannot have optional arguments but it allows
multiple commands to be chained together.
:param result_callback: the result callback to attach to this multi
command.
"""
allow_extra_args = True
allow_interspersed_args = False
def __init__(
self,
name=None,
invoke_without_command=False,
no_args_is_help=None,
subcommand_metavar=None,
chain=False,
result_callback=None,
**attrs
):
Command.__init__(self, name, **attrs)
if no_args_is_help is None:
no_args_is_help = not invoke_without_command
self.no_args_is_help = no_args_is_help
self.invoke_without_command = invoke_without_command
if subcommand_metavar is None:
if chain:
subcommand_metavar = SUBCOMMANDS_METAVAR
else:
subcommand_metavar = SUBCOMMAND_METAVAR
self.subcommand_metavar = subcommand_metavar
self.chain = chain
#: The result callback that is stored. This can be set or
#: overridden with the :func:`resultcallback` decorator.
self.result_callback = result_callback
if self.chain:
for param in self.params:
if isinstance(param, Argument) and not param.required:
raise RuntimeError(
"Multi commands in chain mode cannot have"
" optional arguments."
)
def collect_usage_pieces(self, ctx):
rv = Command.collect_usage_pieces(self, ctx)
rv.append(self.subcommand_metavar)
return rv
def format_options(self, ctx, formatter):
Command.format_options(self, ctx, formatter)
self.format_commands(ctx, formatter)
def resultcallback(self, replace=False):
"""Adds a result callback to the chain command. By default if a
result callback is already registered this will chain them but
this can be disabled with the `replace` parameter. The result
callback is invoked with the return value of the subcommand
(or the list of return values from all subcommands if chaining
is enabled) as well as the parameters as they would be passed
to the main callback.
Example::
@click.group()
@click.option('-i', '--input', default=23)
def cli(input):
return 42
@cli.resultcallback()
def process_result(result, input):
return result + input
.. versionadded:: 3.0
:param replace: if set to `True` an already existing result
callback will be removed.
"""
def decorator(f):
old_callback = self.result_callback
if old_callback is None or replace:
self.result_callback = f
return f
def function(__value, *args, **kwargs):
return f(old_callback(__value, *args, **kwargs), *args, **kwargs)
self.result_callback = rv = update_wrapper(function, f)
return rv
return decorator
def format_commands(self, ctx, formatter):
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
commands = []
for subcommand in self.list_commands(ctx):
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
if cmd.hidden:
continue
commands.append((subcommand, cmd))
# allow for 3 times the default spacing
if len(commands):
limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands)
rows = []
for subcommand, cmd in commands:
help = cmd.get_short_help_str(limit)
rows.append((subcommand, help))
if rows:
with formatter.section("Commands"):
formatter.write_dl(rows)
def parse_args(self, ctx, args):
if not args and self.no_args_is_help and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
rest = Command.parse_args(self, ctx, args)
if self.chain:
ctx.protected_args = rest
ctx.args = []
elif rest:
ctx.protected_args, ctx.args = rest[:1], rest[1:]
return ctx.args
def invoke(self, ctx):
def _process_result(value):
if self.result_callback is not None:
value = ctx.invoke(self.result_callback, value, **ctx.params)
return value
if not ctx.protected_args:
# If we are invoked without command the chain flag controls
# how this happens. If we are not in chain mode, the return
# value here is the return value of the command.
# If however we are in chain mode, the return value is the
# return value of the result processor invoked with an empty
# list (which means that no subcommand actually was executed).
if self.invoke_without_command:
if not self.chain:
return Command.invoke(self, ctx)
with ctx:
Command.invoke(self, ctx)
return _process_result([])
ctx.fail("Missing command.")
# Fetch args back out
args = ctx.protected_args + ctx.args
ctx.args = []
ctx.protected_args = []
# If we're not in chain mode, we only allow the invocation of a
# single command but we also inform the current context about the
# name of the command to invoke.
if not self.chain:
# Make sure the context is entered so we do not clean up
# resources until the result processor has worked.
with ctx:
cmd_name, cmd, args = self.resolve_command(ctx, args)
ctx.invoked_subcommand = cmd_name
Command.invoke(self, ctx)
sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
with sub_ctx:
return _process_result(sub_ctx.command.invoke(sub_ctx))
# In chain mode we create the contexts step by step, but after the
# base command has been invoked. Because at that point we do not
# know the subcommands yet, the invoked subcommand attribute is
# set to ``*`` to inform the command that subcommands are executed
# but nothing else.
with ctx:
ctx.invoked_subcommand = "*" if args else None
Command.invoke(self, ctx)
# Otherwise we make every single context and invoke them in a
# chain. In that case the return value to the result processor
# is the list of all invoked subcommand's results.
contexts = []
while args:
cmd_name, cmd, args = self.resolve_command(ctx, args)
sub_ctx = cmd.make_context(
cmd_name,
args,
parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False,
)
contexts.append(sub_ctx)
args, sub_ctx.args = sub_ctx.args, []
rv = []
for sub_ctx in contexts:
with sub_ctx:
rv.append(sub_ctx.command.invoke(sub_ctx))
return _process_result(rv)
def resolve_command(self, ctx, args):
cmd_name = make_str(args[0])
original_cmd_name = cmd_name
# Get the command
cmd = self.get_command(ctx, cmd_name)
# If we can't find the command but there is a normalization
# function available, we try with that one.
if cmd is None and ctx.token_normalize_func is not None:
cmd_name = ctx.token_normalize_func(cmd_name)
cmd = self.get_command(ctx, cmd_name)
# If we don't find the command we want to show an error message
# to the user that it was not provided. However, there is
# something else we should do: if the first argument looks like
# an option we want to kick off parsing again for arguments to
# resolve things like --help which now should go to the main
# place.
if cmd is None and not ctx.resilient_parsing:
if split_opt(cmd_name)[0]:
self.parse_args(ctx, ctx.args)
ctx.fail("No such command '{}'.".format(original_cmd_name))
return cmd_name, cmd, args[1:]
def get_command(self, ctx, cmd_name):
"""Given a context and a command name, this returns a
:class:`Command` object if it exists or returns `None`.
"""
raise NotImplementedError()
def list_commands(self, ctx):
"""Returns a list of subcommand names in the order they should
appear.
"""
return []
class Group(MultiCommand):
"""A group allows a command to have subcommands attached. This is the
most common way to implement nesting in Click.
:param commands: a dictionary of commands.
"""
def __init__(self, name=None, commands=None, **attrs):
MultiCommand.__init__(self, name, **attrs)
#: the registered subcommands by their exported names.
self.commands = commands or {}
def add_command(self, cmd, name=None):
"""Registers another :class:`Command` with this group. If the name
is not provided, the name of the command is used.
"""
name = name or cmd.name
if name is None:
raise TypeError("Command has no name.")
_check_multicommand(self, name, cmd, register=True)
self.commands[name] = cmd
def command(self, *args, **kwargs):
"""A shortcut decorator for declaring and attaching a command to
the group. This takes the same arguments as :func:`command` but
immediately registers the created command with this instance by
calling into :meth:`add_command`.
"""
from .decorators import command
def decorator(f):
cmd = command(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
def group(self, *args, **kwargs):
"""A shortcut decorator for declaring and attaching a group to
the group. This takes the same arguments as :func:`group` but
immediately registers the created command with this instance by
calling into :meth:`add_command`.
"""
from .decorators import group
def decorator(f):
cmd = group(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
def get_command(self, ctx, cmd_name):
return self.commands.get(cmd_name)
def list_commands(self, ctx):
return sorted(self.commands)
class CommandCollection(MultiCommand):
"""A command collection is a multi command that merges multiple multi
commands together into one. This is a straightforward implementation
that accepts a list of different multi commands as sources and
provides all the commands for each of them.
"""
def __init__(self, name=None, sources=None, **attrs):
MultiCommand.__init__(self, name, **attrs)
#: The list of registered multi commands.
self.sources = sources or []
def add_source(self, multi_cmd):
"""Adds a new multi command to the chain dispatcher."""
self.sources.append(multi_cmd)
def get_command(self, ctx, cmd_name):
for source in self.sources:
rv = source.get_command(ctx, cmd_name)
if rv is not None:
if self.chain:
_check_multicommand(self, cmd_name, rv)
return rv
def list_commands(self, ctx):
rv = set()
for source in self.sources:
rv.update(source.list_commands(ctx))
return sorted(rv)
class Parameter(object):
r"""A parameter to a command comes in two versions: they are either
:class:`Option`\s or :class:`Argument`\s. Other subclasses are currently
not supported by design as some of the internals for parsing are
intentionally not finalized.
Some settings are supported by both options and arguments.
:param param_decls: the parameter declarations for this option or
argument. This is a list of flags or argument
names.
:param type: the type that should be used. Either a :class:`ParamType`
or a Python type. The later is converted into the former
automatically if supported.
:param required: controls if this is optional or not.
:param default: the default value if omitted. This can also be a callable,
in which case it's invoked when the default is needed
without any arguments.
:param callback: a callback that should be executed after the parameter
was matched. This is called as ``fn(ctx, param,
value)`` and needs to return the value.
:param nargs: the number of arguments to match. If not ``1`` the return
value is a tuple instead of single value. The default for
nargs is ``1`` (except if the type is a tuple, then it's
the arity of the tuple).
:param metavar: how the value is represented in the help page.
:param expose_value: if this is `True` then the value is passed onwards
to the command callback and stored on the context,
otherwise it's skipped.
:param is_eager: eager values are processed before non eager ones. This
should not be set for arguments or it will inverse the
order of processing.
:param envvar: a string or list of strings that are environment variables
that should be checked.
.. versionchanged:: 7.1
Empty environment variables are ignored rather than taking the
empty string value. This makes it possible for scripts to clear
variables if they can't unset them.
.. versionchanged:: 2.0
Changed signature for parameter callback to also be passed the
parameter. The old callback format will still work, but it will
raise a warning to give you a chance to migrate the code easier.
"""
param_type_name = "parameter"
def __init__(
self,
param_decls=None,
type=None,
required=False,
default=None,
callback=None,
nargs=None,
metavar=None,
expose_value=True,
is_eager=False,
envvar=None,
autocompletion=None,
):
self.name, self.opts, self.secondary_opts = self._parse_decls(
param_decls or (), expose_value
)
self.type = convert_type(type, default)
# Default nargs to what the type tells us if we have that
# information available.
if nargs is None:
if self.type.is_composite:
nargs = self.type.arity
else:
nargs = 1
self.required = required
self.callback = callback
self.nargs = nargs
self.multiple = False
self.expose_value = expose_value
self.default = default
self.is_eager = is_eager
self.metavar = metavar
self.envvar = envvar
self.autocompletion = autocompletion
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.name)
@property
def human_readable_name(self):
"""Returns the human readable name of this parameter. This is the
same as the name for options, but the metavar for arguments.
"""
return self.name
def make_metavar(self):
if self.metavar is not None:
return self.metavar
metavar = self.type.get_metavar(self)
if metavar is None:
metavar = self.type.name.upper()
if self.nargs != 1:
metavar += "..."
return metavar
def get_default(self, ctx):
"""Given a context variable this calculates the default value."""
# Otherwise go with the regular default.
if callable(self.default):
rv = self.default()
else:
rv = self.default
return self.type_cast_value(ctx, rv)
def add_to_parser(self, parser, ctx):
pass
def consume_value(self, ctx, opts):
value = opts.get(self.name)
if value is None:
value = self.value_from_envvar(ctx)
if value is None:
value = ctx.lookup_default(self.name)
return value
def type_cast_value(self, ctx, value):
"""Given a value this runs it properly through the type system.
This automatically handles things like `nargs` and `multiple` as
well as composite types.
"""
if self.type.is_composite:
if self.nargs <= 1:
raise TypeError(
"Attempted to invoke composite type but nargs has"
" been set to {}. This is not supported; nargs"
" needs to be set to a fixed value > 1.".format(self.nargs)
)
if self.multiple:
return tuple(self.type(x or (), self, ctx) for x in value or ())
return self.type(value or (), self, ctx)
def _convert(value, level):
if level == 0:
return self.type(value, self, ctx)
return tuple(_convert(x, level - 1) for x in value or ())
return _convert(value, (self.nargs != 1) + bool(self.multiple))
def process_value(self, ctx, value):
"""Given a value and context this runs the logic to convert the
value as necessary.
"""
# If the value we were given is None we do nothing. This way
# code that calls this can easily figure out if something was
# not provided. Otherwise it would be converted into an empty
# tuple for multiple invocations which is inconvenient.
if value is not None:
return self.type_cast_value(ctx, value)
def value_is_missing(self, value):
if value is None:
return True
if (self.nargs != 1 or self.multiple) and value == ():
return True
return False
def full_process_value(self, ctx, value):
value = self.process_value(ctx, value)
if value is None and not ctx.resilient_parsing:
value = self.get_default(ctx)
if self.required and self.value_is_missing(value):
raise MissingParameter(ctx=ctx, param=self)
return value
def resolve_envvar_value(self, ctx):
if self.envvar is None:
return
if isinstance(self.envvar, (tuple, list)):
for envvar in self.envvar:
rv = os.environ.get(envvar)
if rv is not None:
return rv
else:
rv = os.environ.get(self.envvar)
if rv != "":
return rv
def value_from_envvar(self, ctx):
rv = self.resolve_envvar_value(ctx)
if rv is not None and self.nargs != 1:
rv = self.type.split_envvar_value(rv)
return rv
def handle_parse_result(self, ctx, opts, args):
with augment_usage_errors(ctx, param=self):
value = self.consume_value(ctx, opts)
try:
value = self.full_process_value(ctx, value)
except Exception:
if not ctx.resilient_parsing:
raise
value = None
if self.callback is not None:
try:
value = invoke_param_callback(self.callback, ctx, self, value)
except Exception:
if not ctx.resilient_parsing:
raise
if self.expose_value:
ctx.params[self.name] = value
return value, args
def get_help_record(self, ctx):
pass
def get_usage_pieces(self, ctx):
return []
def get_error_hint(self, ctx):
"""Get a stringified version of the param for use in error messages to
indicate which param caused the error.
"""
hint_list = self.opts or [self.human_readable_name]
return " / ".join(repr(x) for x in hint_list)
class Option(Parameter):
"""Options are usually optional values on the command line and
have some extra features that arguments don't have.
All other parameters are passed onwards to the parameter constructor.
:param show_default: controls if the default value should be shown on the
help page. Normally, defaults are not shown. If this
value is a string, it shows the string instead of the
value. This is particularly useful for dynamic options.
:param show_envvar: controls if an environment variable should be shown on
the help page. Normally, environment variables
are not shown.
:param prompt: if set to `True` or a non empty string then the user will be
prompted for input. If set to `True` the prompt will be the
option name capitalized.
:param confirmation_prompt: if set then the value will need to be confirmed
if it was prompted for.
:param hide_input: if this is `True` then the input on the prompt will be
hidden from the user. This is useful for password
input.
:param is_flag: forces this option to act as a flag. The default is
auto detection.
:param flag_value: which value should be used for this flag if it's
enabled. This is set to a boolean automatically if
the option string contains a slash to mark two options.
:param multiple: if this is set to `True` then the argument is accepted
multiple times and recorded. This is similar to ``nargs``
in how it works but supports arbitrary number of
arguments.
:param count: this flag makes an option increment an integer.
:param allow_from_autoenv: if this is enabled then the value of this
parameter will be pulled from an environment
variable in case a prefix is defined on the
context.
:param help: the help string.
:param hidden: hide this option from help outputs.
"""
param_type_name = "option"
def __init__(
self,
param_decls=None,
show_default=False,
prompt=False,
confirmation_prompt=False,
hide_input=False,
is_flag=None,
flag_value=None,
multiple=False,
count=False,
allow_from_autoenv=True,
type=None,
help=None,
hidden=False,
show_choices=True,
show_envvar=False,
**attrs
):
default_is_missing = attrs.get("default", _missing) is _missing
Parameter.__init__(self, param_decls, type=type, **attrs)
if prompt is True:
prompt_text = self.name.replace("_", " ").capitalize()
elif prompt is False:
prompt_text = None
else:
prompt_text = prompt
self.prompt = prompt_text
self.confirmation_prompt = confirmation_prompt
self.hide_input = hide_input
self.hidden = hidden
# Flags
if is_flag is None:
if flag_value is not None:
is_flag = True
else:
is_flag = bool(self.secondary_opts)
if is_flag and default_is_missing:
self.default = False
if flag_value is None:
flag_value = not self.default
self.is_flag = is_flag
self.flag_value = flag_value
if self.is_flag and isinstance(self.flag_value, bool) and type in [None, bool]:
self.type = BOOL
self.is_bool_flag = True
else:
self.is_bool_flag = False
# Counting
self.count = count
if count:
if type is None:
self.type = IntRange(min=0)
if default_is_missing:
self.default = 0
self.multiple = multiple
self.allow_from_autoenv = allow_from_autoenv
self.help = help
self.show_default = show_default
self.show_choices = show_choices
self.show_envvar = show_envvar
# Sanity check for stuff we don't support
if __debug__:
if self.nargs < 0:
raise TypeError("Options cannot have nargs < 0")
if self.prompt and self.is_flag and not self.is_bool_flag:
raise TypeError("Cannot prompt for flags that are not bools.")
if not self.is_bool_flag and self.secondary_opts:
raise TypeError("Got secondary option for non boolean flag.")
if self.is_bool_flag and self.hide_input and self.prompt is not None:
raise TypeError("Hidden input does not work with boolean flag prompts.")
if self.count:
if self.multiple:
raise TypeError(
"Options cannot be multiple and count at the same time."
)
elif self.is_flag:
raise TypeError(
"Options cannot be count and flags at the same time."
)
def _parse_decls(self, decls, expose_value):
opts = []
secondary_opts = []
name = None
possible_names = []
for decl in decls:
if isidentifier(decl):
if name is not None:
raise TypeError("Name defined twice")
name = decl
else:
split_char = ";" if decl[:1] == "/" else "/"
if split_char in decl:
first, second = decl.split(split_char, 1)
first = first.rstrip()
if first:
possible_names.append(split_opt(first))
opts.append(first)
second = second.lstrip()
if second:
secondary_opts.append(second.lstrip())
else:
possible_names.append(split_opt(decl))
opts.append(decl)
if name is None and possible_names:
possible_names.sort(key=lambda x: -len(x[0])) # group long options first
name = possible_names[0][1].replace("-", "_").lower()
if not isidentifier(name):
name = None
if name is None:
if not expose_value:
return None, opts, secondary_opts
raise TypeError("Could not determine name for option")
if not opts and not secondary_opts:
raise TypeError(
"No options defined but a name was passed ({}). Did you"
" mean to declare an argument instead of an option?".format(name)
)
return name, opts, secondary_opts
def add_to_parser(self, parser, ctx):
kwargs = {
"dest": self.name,
"nargs": self.nargs,
"obj": self,
}
if self.multiple:
action = "append"
elif self.count:
action = "count"
else:
action = "store"
if self.is_flag:
kwargs.pop("nargs", None)
action_const = "{}_const".format(action)
if self.is_bool_flag and self.secondary_opts:
parser.add_option(self.opts, action=action_const, const=True, **kwargs)
parser.add_option(
self.secondary_opts, action=action_const, const=False, **kwargs
)
else:
parser.add_option(
self.opts, action=action_const, const=self.flag_value, **kwargs
)
else:
kwargs["action"] = action
parser.add_option(self.opts, **kwargs)
def get_help_record(self, ctx):
if self.hidden:
return
any_prefix_is_slash = []
def _write_opts(opts):
rv, any_slashes = join_options(opts)
if any_slashes:
any_prefix_is_slash[:] = [True]
if not self.is_flag and not self.count:
rv += " {}".format(self.make_metavar())
return rv
rv = [_write_opts(self.opts)]
if self.secondary_opts:
rv.append(_write_opts(self.secondary_opts))
help = self.help or ""
extra = []
if self.show_envvar:
envvar = self.envvar
if envvar is None:
if self.allow_from_autoenv and ctx.auto_envvar_prefix is not None:
envvar = "{}_{}".format(ctx.auto_envvar_prefix, self.name.upper())
if envvar is not None:
extra.append(
"env var: {}".format(
", ".join(str(d) for d in envvar)
if isinstance(envvar, (list, tuple))
else envvar
)
)
if self.default is not None and (self.show_default or ctx.show_default):
if isinstance(self.show_default, string_types):
default_string = "({})".format(self.show_default)
elif isinstance(self.default, (list, tuple)):
default_string = ", ".join(str(d) for d in self.default)
elif inspect.isfunction(self.default):
default_string = "(dynamic)"
else:
default_string = self.default
extra.append("default: {}".format(default_string))
if self.required:
extra.append("required")
if extra:
help = "{}[{}]".format(
"{} ".format(help) if help else "", "; ".join(extra)
)
return ("; " if any_prefix_is_slash else " / ").join(rv), help
def get_default(self, ctx):
# If we're a non boolean flag our default is more complex because
# we need to look at all flags in the same group to figure out
# if we're the the default one in which case we return the flag
# value as default.
if self.is_flag and not self.is_bool_flag:
for param in ctx.command.params:
if param.name == self.name and param.default:
return param.flag_value
return None
return Parameter.get_default(self, ctx)
def prompt_for_value(self, ctx):
"""This is an alternative flow that can be activated in the full
value processing if a value does not exist. It will prompt the
user until a valid value exists and then returns the processed
value as result.
"""
# Calculate the default before prompting anything to be stable.
default = self.get_default(ctx)
# If this is a prompt for a flag we need to handle this
# differently.
if self.is_bool_flag:
return confirm(self.prompt, default)
return prompt(
self.prompt,
default=default,
type=self.type,
hide_input=self.hide_input,
show_choices=self.show_choices,
confirmation_prompt=self.confirmation_prompt,
value_proc=lambda x: self.process_value(ctx, x),
)
def resolve_envvar_value(self, ctx):
rv = Parameter.resolve_envvar_value(self, ctx)
if rv is not None:
return rv
if self.allow_from_autoenv and ctx.auto_envvar_prefix is not None:
envvar = "{}_{}".format(ctx.auto_envvar_prefix, self.name.upper())
return os.environ.get(envvar)
def value_from_envvar(self, ctx):
rv = self.resolve_envvar_value(ctx)
if rv is None:
return None
value_depth = (self.nargs != 1) + bool(self.multiple)
if value_depth > 0 and rv is not None:
rv = self.type.split_envvar_value(rv)
if self.multiple and self.nargs != 1:
rv = batch(rv, self.nargs)
return rv
def full_process_value(self, ctx, value):
if value is None and self.prompt is not None and not ctx.resilient_parsing:
return self.prompt_for_value(ctx)
return Parameter.full_process_value(self, ctx, value)
class Argument(Parameter):
"""Arguments are positional parameters to a command. They generally
provide fewer features than options but can have infinite ``nargs``
and are required by default.
All parameters are passed onwards to the parameter constructor.
"""
param_type_name = "argument"
def __init__(self, param_decls, required=None, **attrs):
if required is None:
if attrs.get("default") is not None:
required = False
else:
required = attrs.get("nargs", 1) > 0
Parameter.__init__(self, param_decls, required=required, **attrs)
if self.default is not None and self.nargs < 0:
raise TypeError(
"nargs=-1 in combination with a default value is not supported."
)
@property
def human_readable_name(self):
if self.metavar is not None:
return self.metavar
return self.name.upper()
def make_metavar(self):
if self.metavar is not None:
return self.metavar
var = self.type.get_metavar(self)
if not var:
var = self.name.upper()
if not self.required:
var = "[{}]".format(var)
if self.nargs != 1:
var += "..."
return var
def _parse_decls(self, decls, expose_value):
if not decls:
if not expose_value:
return None, [], []
raise TypeError("Could not determine name for argument")
if len(decls) == 1:
name = arg = decls[0]
name = name.replace("-", "_").lower()
else:
raise TypeError(
"Arguments take exactly one parameter declaration, got"
" {}".format(len(decls))
)
return name, [arg], []
def get_usage_pieces(self, ctx):
return [self.make_metavar()]
def get_error_hint(self, ctx):
return repr(self.make_metavar())
def add_to_parser(self, parser, ctx):
parser.add_argument(dest=self.name, nargs=self.nargs, obj=self)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/click/types.py
|
import os
import stat
from datetime import datetime
from ._compat import _get_argv_encoding
from ._compat import filename_to_ui
from ._compat import get_filesystem_encoding
from ._compat import get_streerror
from ._compat import open_stream
from ._compat import PY2
from ._compat import text_type
from .exceptions import BadParameter
from .utils import LazyFile
from .utils import safecall
class ParamType(object):
"""Helper for converting values through types. The following is
necessary for a valid type:
* it needs a name
* it needs to pass through None unchanged
* it needs to convert from a string
* it needs to convert its result type through unchanged
(eg: needs to be idempotent)
* it needs to be able to deal with param and context being `None`.
This can be the case when the object is used with prompt
inputs.
"""
is_composite = False
#: the descriptive name of this type
name = None
#: if a list of this type is expected and the value is pulled from a
#: string environment variable, this is what splits it up. `None`
#: means any whitespace. For all parameters the general rule is that
#: whitespace splits them up. The exception are paths and files which
#: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
#: Windows).
envvar_list_splitter = None
def __call__(self, value, param=None, ctx=None):
if value is not None:
return self.convert(value, param, ctx)
def get_metavar(self, param):
"""Returns the metavar default for this param if it provides one."""
def get_missing_message(self, param):
"""Optionally might return extra information about a missing
parameter.
.. versionadded:: 2.0
"""
def convert(self, value, param, ctx):
"""Converts the value. This is not invoked for values that are
`None` (the missing value).
"""
return value
def split_envvar_value(self, rv):
"""Given a value from an environment variable this splits it up
into small chunks depending on the defined envvar list splitter.
If the splitter is set to `None`, which means that whitespace splits,
then leading and trailing whitespace is ignored. Otherwise, leading
and trailing splitters usually lead to empty items being included.
"""
return (rv or "").split(self.envvar_list_splitter)
def fail(self, message, param=None, ctx=None):
"""Helper method to fail with an invalid value message."""
raise BadParameter(message, ctx=ctx, param=param)
class CompositeParamType(ParamType):
is_composite = True
@property
def arity(self):
raise NotImplementedError()
class FuncParamType(ParamType):
def __init__(self, func):
self.name = func.__name__
self.func = func
def convert(self, value, param, ctx):
try:
return self.func(value)
except ValueError:
try:
value = text_type(value)
except UnicodeError:
value = str(value).decode("utf-8", "replace")
self.fail(value, param, ctx)
class UnprocessedParamType(ParamType):
name = "text"
def convert(self, value, param, ctx):
return value
def __repr__(self):
return "UNPROCESSED"
class StringParamType(ParamType):
name = "text"
def convert(self, value, param, ctx):
if isinstance(value, bytes):
enc = _get_argv_encoding()
try:
value = value.decode(enc)
except UnicodeError:
fs_enc = get_filesystem_encoding()
if fs_enc != enc:
try:
value = value.decode(fs_enc)
except UnicodeError:
value = value.decode("utf-8", "replace")
else:
value = value.decode("utf-8", "replace")
return value
return value
def __repr__(self):
return "STRING"
class Choice(ParamType):
"""The choice type allows a value to be checked against a fixed set
of supported values. All of these values have to be strings.
You should only pass a list or tuple of choices. Other iterables
(like generators) may lead to surprising results.
The resulting value will always be one of the originally passed choices
regardless of ``case_sensitive`` or any ``ctx.token_normalize_func``
being specified.
See :ref:`choice-opts` for an example.
:param case_sensitive: Set to false to make choices case
insensitive. Defaults to true.
"""
name = "choice"
def __init__(self, choices, case_sensitive=True):
self.choices = choices
self.case_sensitive = case_sensitive
def get_metavar(self, param):
return "[{}]".format("|".join(self.choices))
def get_missing_message(self, param):
return "Choose from:\n\t{}.".format(",\n\t".join(self.choices))
def convert(self, value, param, ctx):
# Match through normalization and case sensitivity
# first do token_normalize_func, then lowercase
# preserve original `value` to produce an accurate message in
# `self.fail`
normed_value = value
normed_choices = {choice: choice for choice in self.choices}
if ctx is not None and ctx.token_normalize_func is not None:
normed_value = ctx.token_normalize_func(value)
normed_choices = {
ctx.token_normalize_func(normed_choice): original
for normed_choice, original in normed_choices.items()
}
if not self.case_sensitive:
if PY2:
lower = str.lower
else:
lower = str.casefold
normed_value = lower(normed_value)
normed_choices = {
lower(normed_choice): original
for normed_choice, original in normed_choices.items()
}
if normed_value in normed_choices:
return normed_choices[normed_value]
self.fail(
"invalid choice: {}. (choose from {})".format(
value, ", ".join(self.choices)
),
param,
ctx,
)
def __repr__(self):
return "Choice('{}')".format(list(self.choices))
class DateTime(ParamType):
"""The DateTime type converts date strings into `datetime` objects.
The format strings which are checked are configurable, but default to some
common (non-timezone aware) ISO 8601 formats.
When specifying *DateTime* formats, you should only pass a list or a tuple.
Other iterables, like generators, may lead to surprising results.
The format strings are processed using ``datetime.strptime``, and this
consequently defines the format strings which are allowed.
Parsing is tried using each format, in order, and the first format which
parses successfully is used.
:param formats: A list or tuple of date format strings, in the order in
which they should be tried. Defaults to
``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``,
``'%Y-%m-%d %H:%M:%S'``.
"""
name = "datetime"
def __init__(self, formats=None):
self.formats = formats or ["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"]
def get_metavar(self, param):
return "[{}]".format("|".join(self.formats))
def _try_to_convert_date(self, value, format):
try:
return datetime.strptime(value, format)
except ValueError:
return None
def convert(self, value, param, ctx):
# Exact match
for format in self.formats:
dtime = self._try_to_convert_date(value, format)
if dtime:
return dtime
self.fail(
"invalid datetime format: {}. (choose from {})".format(
value, ", ".join(self.formats)
)
)
def __repr__(self):
return "DateTime"
class IntParamType(ParamType):
name = "integer"
def convert(self, value, param, ctx):
try:
return int(value)
except ValueError:
self.fail("{} is not a valid integer".format(value), param, ctx)
def __repr__(self):
return "INT"
class IntRange(IntParamType):
"""A parameter that works similar to :data:`click.INT` but restricts
the value to fit into a range. The default behavior is to fail if the
value falls outside the range, but it can also be silently clamped
between the two edges.
See :ref:`ranges` for an example.
"""
name = "integer range"
def __init__(self, min=None, max=None, clamp=False):
self.min = min
self.max = max
self.clamp = clamp
def convert(self, value, param, ctx):
rv = IntParamType.convert(self, value, param, ctx)
if self.clamp:
if self.min is not None and rv < self.min:
return self.min
if self.max is not None and rv > self.max:
return self.max
if (
self.min is not None
and rv < self.min
or self.max is not None
and rv > self.max
):
if self.min is None:
self.fail(
"{} is bigger than the maximum valid value {}.".format(
rv, self.max
),
param,
ctx,
)
elif self.max is None:
self.fail(
"{} is smaller than the minimum valid value {}.".format(
rv, self.min
),
param,
ctx,
)
else:
self.fail(
"{} is not in the valid range of {} to {}.".format(
rv, self.min, self.max
),
param,
ctx,
)
return rv
def __repr__(self):
return "IntRange({}, {})".format(self.min, self.max)
class FloatParamType(ParamType):
name = "float"
def convert(self, value, param, ctx):
try:
return float(value)
except ValueError:
self.fail(
"{} is not a valid floating point value".format(value), param, ctx
)
def __repr__(self):
return "FLOAT"
class FloatRange(FloatParamType):
"""A parameter that works similar to :data:`click.FLOAT` but restricts
the value to fit into a range. The default behavior is to fail if the
value falls outside the range, but it can also be silently clamped
between the two edges.
See :ref:`ranges` for an example.
"""
name = "float range"
def __init__(self, min=None, max=None, clamp=False):
self.min = min
self.max = max
self.clamp = clamp
def convert(self, value, param, ctx):
rv = FloatParamType.convert(self, value, param, ctx)
if self.clamp:
if self.min is not None and rv < self.min:
return self.min
if self.max is not None and rv > self.max:
return self.max
if (
self.min is not None
and rv < self.min
or self.max is not None
and rv > self.max
):
if self.min is None:
self.fail(
"{} is bigger than the maximum valid value {}.".format(
rv, self.max
),
param,
ctx,
)
elif self.max is None:
self.fail(
"{} is smaller than the minimum valid value {}.".format(
rv, self.min
),
param,
ctx,
)
else:
self.fail(
"{} is not in the valid range of {} to {}.".format(
rv, self.min, self.max
),
param,
ctx,
)
return rv
def __repr__(self):
return "FloatRange({}, {})".format(self.min, self.max)
class BoolParamType(ParamType):
name = "boolean"
def convert(self, value, param, ctx):
if isinstance(value, bool):
return bool(value)
value = value.lower()
if value in ("true", "t", "1", "yes", "y"):
return True
elif value in ("false", "f", "0", "no", "n"):
return False
self.fail("{} is not a valid boolean".format(value), param, ctx)
def __repr__(self):
return "BOOL"
class UUIDParameterType(ParamType):
name = "uuid"
def convert(self, value, param, ctx):
import uuid
try:
if PY2 and isinstance(value, text_type):
value = value.encode("ascii")
return uuid.UUID(value)
except ValueError:
self.fail("{} is not a valid UUID value".format(value), param, ctx)
def __repr__(self):
return "UUID"
class File(ParamType):
"""Declares a parameter to be a file for reading or writing. The file
is automatically closed once the context tears down (after the command
finished working).
Files can be opened for reading or writing. The special value ``-``
indicates stdin or stdout depending on the mode.
By default, the file is opened for reading text data, but it can also be
opened in binary mode or for writing. The encoding parameter can be used
to force a specific encoding.
The `lazy` flag controls if the file should be opened immediately or upon
first IO. The default is to be non-lazy for standard input and output
streams as well as files opened for reading, `lazy` otherwise. When opening a
file lazily for reading, it is still opened temporarily for validation, but
will not be held open until first IO. lazy is mainly useful when opening
for writing to avoid creating the file until it is needed.
Starting with Click 2.0, files can also be opened atomically in which
case all writes go into a separate file in the same folder and upon
completion the file will be moved over to the original location. This
is useful if a file regularly read by other users is modified.
See :ref:`file-args` for more information.
"""
name = "filename"
envvar_list_splitter = os.path.pathsep
def __init__(
self, mode="r", encoding=None, errors="strict", lazy=None, atomic=False
):
self.mode = mode
self.encoding = encoding
self.errors = errors
self.lazy = lazy
self.atomic = atomic
def resolve_lazy_flag(self, value):
if self.lazy is not None:
return self.lazy
if value == "-":
return False
elif "w" in self.mode:
return True
return False
def convert(self, value, param, ctx):
try:
if hasattr(value, "read") or hasattr(value, "write"):
return value
lazy = self.resolve_lazy_flag(value)
if lazy:
f = LazyFile(
value, self.mode, self.encoding, self.errors, atomic=self.atomic
)
if ctx is not None:
ctx.call_on_close(f.close_intelligently)
return f
f, should_close = open_stream(
value, self.mode, self.encoding, self.errors, atomic=self.atomic
)
# If a context is provided, we automatically close the file
# at the end of the context execution (or flush out). If a
# context does not exist, it's the caller's responsibility to
# properly close the file. This for instance happens when the
# type is used with prompts.
if ctx is not None:
if should_close:
ctx.call_on_close(safecall(f.close))
else:
ctx.call_on_close(safecall(f.flush))
return f
except (IOError, OSError) as e: # noqa: B014
self.fail(
"Could not open file: {}: {}".format(
filename_to_ui(value), get_streerror(e)
),
param,
ctx,
)
class Path(ParamType):
"""The path type is similar to the :class:`File` type but it performs
different checks. First of all, instead of returning an open file
handle it returns just the filename. Secondly, it can perform various
basic checks about what the file or directory should be.
.. versionchanged:: 6.0
`allow_dash` was added.
:param exists: if set to true, the file or directory needs to exist for
this value to be valid. If this is not required and a
file does indeed not exist, then all further checks are
silently skipped.
:param file_okay: controls if a file is a possible value.
:param dir_okay: controls if a directory is a possible value.
:param writable: if true, a writable check is performed.
:param readable: if true, a readable check is performed.
:param resolve_path: if this is true, then the path is fully resolved
before the value is passed onwards. This means
that it's absolute and symlinks are resolved. It
will not expand a tilde-prefix, as this is
supposed to be done by the shell only.
:param allow_dash: If this is set to `True`, a single dash to indicate
standard streams is permitted.
:param path_type: optionally a string type that should be used to
represent the path. The default is `None` which
means the return value will be either bytes or
unicode depending on what makes most sense given the
input data Click deals with.
"""
envvar_list_splitter = os.path.pathsep
def __init__(
self,
exists=False,
file_okay=True,
dir_okay=True,
writable=False,
readable=True,
resolve_path=False,
allow_dash=False,
path_type=None,
):
self.exists = exists
self.file_okay = file_okay
self.dir_okay = dir_okay
self.writable = writable
self.readable = readable
self.resolve_path = resolve_path
self.allow_dash = allow_dash
self.type = path_type
if self.file_okay and not self.dir_okay:
self.name = "file"
self.path_type = "File"
elif self.dir_okay and not self.file_okay:
self.name = "directory"
self.path_type = "Directory"
else:
self.name = "path"
self.path_type = "Path"
def coerce_path_result(self, rv):
if self.type is not None and not isinstance(rv, self.type):
if self.type is text_type:
rv = rv.decode(get_filesystem_encoding())
else:
rv = rv.encode(get_filesystem_encoding())
return rv
def convert(self, value, param, ctx):
rv = value
is_dash = self.file_okay and self.allow_dash and rv in (b"-", "-")
if not is_dash:
if self.resolve_path:
rv = os.path.realpath(rv)
try:
st = os.stat(rv)
except OSError:
if not self.exists:
return self.coerce_path_result(rv)
self.fail(
"{} '{}' does not exist.".format(
self.path_type, filename_to_ui(value)
),
param,
ctx,
)
if not self.file_okay and stat.S_ISREG(st.st_mode):
self.fail(
"{} '{}' is a file.".format(self.path_type, filename_to_ui(value)),
param,
ctx,
)
if not self.dir_okay and stat.S_ISDIR(st.st_mode):
self.fail(
"{} '{}' is a directory.".format(
self.path_type, filename_to_ui(value)
),
param,
ctx,
)
if self.writable and not os.access(value, os.W_OK):
self.fail(
"{} '{}' is not writable.".format(
self.path_type, filename_to_ui(value)
),
param,
ctx,
)
if self.readable and not os.access(value, os.R_OK):
self.fail(
"{} '{}' is not readable.".format(
self.path_type, filename_to_ui(value)
),
param,
ctx,
)
return self.coerce_path_result(rv)
class Tuple(CompositeParamType):
"""The default behavior of Click is to apply a type on a value directly.
This works well in most cases, except for when `nargs` is set to a fixed
count and different types should be used for different items. In this
case the :class:`Tuple` type can be used. This type can only be used
if `nargs` is set to a fixed number.
For more information see :ref:`tuple-type`.
This can be selected by using a Python tuple literal as a type.
:param types: a list of types that should be used for the tuple items.
"""
def __init__(self, types):
self.types = [convert_type(ty) for ty in types]
@property
def name(self):
return "<{}>".format(" ".join(ty.name for ty in self.types))
@property
def arity(self):
return len(self.types)
def convert(self, value, param, ctx):
if len(value) != len(self.types):
raise TypeError(
"It would appear that nargs is set to conflict with the"
" composite type arity."
)
return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value))
def convert_type(ty, default=None):
"""Converts a callable or python type into the most appropriate
param type.
"""
guessed_type = False
if ty is None and default is not None:
if isinstance(default, tuple):
ty = tuple(map(type, default))
else:
ty = type(default)
guessed_type = True
if isinstance(ty, tuple):
return Tuple(ty)
if isinstance(ty, ParamType):
return ty
if ty is text_type or ty is str or ty is None:
return STRING
if ty is int:
return INT
# Booleans are only okay if not guessed. This is done because for
# flags the default value is actually a bit of a lie in that it
# indicates which of the flags is the one we want. See get_default()
# for more information.
if ty is bool and not guessed_type:
return BOOL
if ty is float:
return FLOAT
if guessed_type:
return STRING
# Catch a common mistake
if __debug__:
try:
if issubclass(ty, ParamType):
raise AssertionError(
"Attempted to use an uninstantiated parameter type ({}).".format(ty)
)
except TypeError:
pass
return FuncParamType(ty)
#: A dummy parameter type that just does nothing. From a user's
#: perspective this appears to just be the same as `STRING` but internally
#: no string conversion takes place. This is necessary to achieve the
#: same bytes/unicode behavior on Python 2/3 in situations where you want
#: to not convert argument types. This is usually useful when working
#: with file paths as they can appear in bytes and unicode.
#:
#: For path related uses the :class:`Path` type is a better choice but
#: there are situations where an unprocessed type is useful which is why
#: it is is provided.
#:
#: .. versionadded:: 4.0
UNPROCESSED = UnprocessedParamType()
#: A unicode string parameter type which is the implicit default. This
#: can also be selected by using ``str`` as type.
STRING = StringParamType()
#: An integer parameter. This can also be selected by using ``int`` as
#: type.
INT = IntParamType()
#: A floating point value parameter. This can also be selected by using
#: ``float`` as type.
FLOAT = FloatParamType()
#: A boolean parameter. This is the default for boolean flags. This can
#: also be selected by using ``bool`` as a type.
BOOL = BoolParamType()
#: A UUID parameter.
UUID = UUIDParameterType()
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/click/formatting.py
|
from contextlib import contextmanager
from ._compat import term_len
from .parser import split_opt
from .termui import get_terminal_size
# Can force a width. This is used by the test system
FORCED_WIDTH = None
def measure_table(rows):
widths = {}
for row in rows:
for idx, col in enumerate(row):
widths[idx] = max(widths.get(idx, 0), term_len(col))
return tuple(y for x, y in sorted(widths.items()))
def iter_rows(rows, col_count):
for row in rows:
row = tuple(row)
yield row + ("",) * (col_count - len(row))
def wrap_text(
text, width=78, initial_indent="", subsequent_indent="", preserve_paragraphs=False
):
"""A helper function that intelligently wraps text. By default, it
assumes that it operates on a single paragraph of text but if the
`preserve_paragraphs` parameter is provided it will intelligently
handle paragraphs (defined by two empty lines).
If paragraphs are handled, a paragraph can be prefixed with an empty
line containing the ``\\b`` character (``\\x08``) to indicate that
no rewrapping should happen in that block.
:param text: the text that should be rewrapped.
:param width: the maximum width for the text.
:param initial_indent: the initial indent that should be placed on the
first line as a string.
:param subsequent_indent: the indent string that should be placed on
each consecutive line.
:param preserve_paragraphs: if this flag is set then the wrapping will
intelligently handle paragraphs.
"""
from ._textwrap import TextWrapper
text = text.expandtabs()
wrapper = TextWrapper(
width,
initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
replace_whitespace=False,
)
if not preserve_paragraphs:
return wrapper.fill(text)
p = []
buf = []
indent = None
def _flush_par():
if not buf:
return
if buf[0].strip() == "\b":
p.append((indent or 0, True, "\n".join(buf[1:])))
else:
p.append((indent or 0, False, " ".join(buf)))
del buf[:]
for line in text.splitlines():
if not line:
_flush_par()
indent = None
else:
if indent is None:
orig_len = term_len(line)
line = line.lstrip()
indent = orig_len - term_len(line)
buf.append(line)
_flush_par()
rv = []
for indent, raw, text in p:
with wrapper.extra_indent(" " * indent):
if raw:
rv.append(wrapper.indent_only(text))
else:
rv.append(wrapper.fill(text))
return "\n\n".join(rv)
class HelpFormatter(object):
"""This class helps with formatting text-based help pages. It's
usually just needed for very special internal cases, but it's also
exposed so that developers can write their own fancy outputs.
At present, it always writes into memory.
:param indent_increment: the additional increment for each level.
:param width: the width for the text. This defaults to the terminal
width clamped to a maximum of 78.
"""
def __init__(self, indent_increment=2, width=None, max_width=None):
self.indent_increment = indent_increment
if max_width is None:
max_width = 80
if width is None:
width = FORCED_WIDTH
if width is None:
width = max(min(get_terminal_size()[0], max_width) - 2, 50)
self.width = width
self.current_indent = 0
self.buffer = []
def write(self, string):
"""Writes a unicode string into the internal buffer."""
self.buffer.append(string)
def indent(self):
"""Increases the indentation."""
self.current_indent += self.indent_increment
def dedent(self):
"""Decreases the indentation."""
self.current_indent -= self.indent_increment
def write_usage(self, prog, args="", prefix="Usage: "):
"""Writes a usage line into the buffer.
:param prog: the program name.
:param args: whitespace separated list of arguments.
:param prefix: the prefix for the first line.
"""
usage_prefix = "{:>{w}}{} ".format(prefix, prog, w=self.current_indent)
text_width = self.width - self.current_indent
if text_width >= (term_len(usage_prefix) + 20):
# The arguments will fit to the right of the prefix.
indent = " " * term_len(usage_prefix)
self.write(
wrap_text(
args,
text_width,
initial_indent=usage_prefix,
subsequent_indent=indent,
)
)
else:
# The prefix is too long, put the arguments on the next line.
self.write(usage_prefix)
self.write("\n")
indent = " " * (max(self.current_indent, term_len(prefix)) + 4)
self.write(
wrap_text(
args, text_width, initial_indent=indent, subsequent_indent=indent
)
)
self.write("\n")
def write_heading(self, heading):
"""Writes a heading into the buffer."""
self.write("{:>{w}}{}:\n".format("", heading, w=self.current_indent))
def write_paragraph(self):
"""Writes a paragraph into the buffer."""
if self.buffer:
self.write("\n")
def write_text(self, text):
"""Writes re-indented text into the buffer. This rewraps and
preserves paragraphs.
"""
text_width = max(self.width - self.current_indent, 11)
indent = " " * self.current_indent
self.write(
wrap_text(
text,
text_width,
initial_indent=indent,
subsequent_indent=indent,
preserve_paragraphs=True,
)
)
self.write("\n")
def write_dl(self, rows, col_max=30, col_spacing=2):
"""Writes a definition list into the buffer. This is how options
and commands are usually formatted.
:param rows: a list of two item tuples for the terms and values.
:param col_max: the maximum width of the first column.
:param col_spacing: the number of spaces between the first and
second column.
"""
rows = list(rows)
widths = measure_table(rows)
if len(widths) != 2:
raise TypeError("Expected two columns for definition list")
first_col = min(widths[0], col_max) + col_spacing
for first, second in iter_rows(rows, len(widths)):
self.write("{:>{w}}{}".format("", first, w=self.current_indent))
if not second:
self.write("\n")
continue
if term_len(first) <= first_col - col_spacing:
self.write(" " * (first_col - term_len(first)))
else:
self.write("\n")
self.write(" " * (first_col + self.current_indent))
text_width = max(self.width - first_col - 2, 10)
wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True)
lines = wrapped_text.splitlines()
if lines:
self.write("{}\n".format(lines[0]))
for line in lines[1:]:
self.write(
"{:>{w}}{}\n".format(
"", line, w=first_col + self.current_indent
)
)
if len(lines) > 1:
# separate long help from next option
self.write("\n")
else:
self.write("\n")
@contextmanager
def section(self, name):
"""Helpful context manager that writes a paragraph, a heading,
and the indents.
:param name: the section name that is written as heading.
"""
self.write_paragraph()
self.write_heading(name)
self.indent()
try:
yield
finally:
self.dedent()
@contextmanager
def indentation(self):
"""A context manager that increases the indentation."""
self.indent()
try:
yield
finally:
self.dedent()
def getvalue(self):
"""Returns the buffer contents."""
return "".join(self.buffer)
def join_options(options):
"""Given a list of option strings this joins them in the most appropriate
way and returns them in the form ``(formatted_string,
any_prefix_is_slash)`` where the second item in the tuple is a flag that
indicates if any of the option prefixes was a slash.
"""
rv = []
any_prefix_is_slash = False
for opt in options:
prefix = split_opt(opt)[0]
if prefix == "/":
any_prefix_is_slash = True
rv.append((len(prefix), opt))
rv.sort(key=lambda x: x[0])
rv = ", ".join(x[1] for x in rv)
return rv, any_prefix_is_slash
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/click/parser.py
|
# -*- coding: utf-8 -*-
"""
This module started out as largely a copy paste from the stdlib's
optparse module with the features removed that we do not need from
optparse because we implement them in Click on a higher level (for
instance type handling, help formatting and a lot more).
The plan is to remove more and more from here over time.
The reason this is a different module and not optparse from the stdlib
is that there are differences in 2.x and 3.x about the error messages
generated and optparse in the stdlib uses gettext for no good reason
and might cause us issues.
Click uses parts of optparse written by Gregory P. Ward and maintained
by the Python Software Foundation. This is limited to code in parser.py.
Copyright 2001-2006 Gregory P. Ward. All rights reserved.
Copyright 2002-2006 Python Software Foundation. All rights reserved.
"""
import re
from collections import deque
from .exceptions import BadArgumentUsage
from .exceptions import BadOptionUsage
from .exceptions import NoSuchOption
from .exceptions import UsageError
def _unpack_args(args, nargs_spec):
"""Given an iterable of arguments and an iterable of nargs specifications,
it returns a tuple with all the unpacked arguments at the first index
and all remaining arguments as the second.
The nargs specification is the number of arguments that should be consumed
or `-1` to indicate that this position should eat up all the remainders.
Missing items are filled with `None`.
"""
args = deque(args)
nargs_spec = deque(nargs_spec)
rv = []
spos = None
def _fetch(c):
try:
if spos is None:
return c.popleft()
else:
return c.pop()
except IndexError:
return None
while nargs_spec:
nargs = _fetch(nargs_spec)
if nargs == 1:
rv.append(_fetch(args))
elif nargs > 1:
x = [_fetch(args) for _ in range(nargs)]
# If we're reversed, we're pulling in the arguments in reverse,
# so we need to turn them around.
if spos is not None:
x.reverse()
rv.append(tuple(x))
elif nargs < 0:
if spos is not None:
raise TypeError("Cannot have two nargs < 0")
spos = len(rv)
rv.append(None)
# spos is the position of the wildcard (star). If it's not `None`,
# we fill it with the remainder.
if spos is not None:
rv[spos] = tuple(args)
args = []
rv[spos + 1 :] = reversed(rv[spos + 1 :])
return tuple(rv), list(args)
def _error_opt_args(nargs, opt):
if nargs == 1:
raise BadOptionUsage(opt, "{} option requires an argument".format(opt))
raise BadOptionUsage(opt, "{} option requires {} arguments".format(opt, nargs))
def split_opt(opt):
first = opt[:1]
if first.isalnum():
return "", opt
if opt[1:2] == first:
return opt[:2], opt[2:]
return first, opt[1:]
def normalize_opt(opt, ctx):
if ctx is None or ctx.token_normalize_func is None:
return opt
prefix, opt = split_opt(opt)
return prefix + ctx.token_normalize_func(opt)
def split_arg_string(string):
"""Given an argument string this attempts to split it into small parts."""
rv = []
for match in re.finditer(
r"('([^'\\]*(?:\\.[^'\\]*)*)'|\"([^\"\\]*(?:\\.[^\"\\]*)*)\"|\S+)\s*",
string,
re.S,
):
arg = match.group().strip()
if arg[:1] == arg[-1:] and arg[:1] in "\"'":
arg = arg[1:-1].encode("ascii", "backslashreplace").decode("unicode-escape")
try:
arg = type(string)(arg)
except UnicodeError:
pass
rv.append(arg)
return rv
class Option(object):
def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None):
self._short_opts = []
self._long_opts = []
self.prefixes = set()
for opt in opts:
prefix, value = split_opt(opt)
if not prefix:
raise ValueError("Invalid start character for option ({})".format(opt))
self.prefixes.add(prefix[0])
if len(prefix) == 1 and len(value) == 1:
self._short_opts.append(opt)
else:
self._long_opts.append(opt)
self.prefixes.add(prefix)
if action is None:
action = "store"
self.dest = dest
self.action = action
self.nargs = nargs
self.const = const
self.obj = obj
@property
def takes_value(self):
return self.action in ("store", "append")
def process(self, value, state):
if self.action == "store":
state.opts[self.dest] = value
elif self.action == "store_const":
state.opts[self.dest] = self.const
elif self.action == "append":
state.opts.setdefault(self.dest, []).append(value)
elif self.action == "append_const":
state.opts.setdefault(self.dest, []).append(self.const)
elif self.action == "count":
state.opts[self.dest] = state.opts.get(self.dest, 0) + 1
else:
raise ValueError("unknown action '{}'".format(self.action))
state.order.append(self.obj)
class Argument(object):
def __init__(self, dest, nargs=1, obj=None):
self.dest = dest
self.nargs = nargs
self.obj = obj
def process(self, value, state):
if self.nargs > 1:
holes = sum(1 for x in value if x is None)
if holes == len(value):
value = None
elif holes != 0:
raise BadArgumentUsage(
"argument {} takes {} values".format(self.dest, self.nargs)
)
state.opts[self.dest] = value
state.order.append(self.obj)
class ParsingState(object):
def __init__(self, rargs):
self.opts = {}
self.largs = []
self.rargs = rargs
self.order = []
class OptionParser(object):
"""The option parser is an internal class that is ultimately used to
parse options and arguments. It's modelled after optparse and brings
a similar but vastly simplified API. It should generally not be used
directly as the high level Click classes wrap it for you.
It's not nearly as extensible as optparse or argparse as it does not
implement features that are implemented on a higher level (such as
types or defaults).
:param ctx: optionally the :class:`~click.Context` where this parser
should go with.
"""
def __init__(self, ctx=None):
#: The :class:`~click.Context` for this parser. This might be
#: `None` for some advanced use cases.
self.ctx = ctx
#: This controls how the parser deals with interspersed arguments.
#: If this is set to `False`, the parser will stop on the first
#: non-option. Click uses this to implement nested subcommands
#: safely.
self.allow_interspersed_args = True
#: This tells the parser how to deal with unknown options. By
#: default it will error out (which is sensible), but there is a
#: second mode where it will ignore it and continue processing
#: after shifting all the unknown options into the resulting args.
self.ignore_unknown_options = False
if ctx is not None:
self.allow_interspersed_args = ctx.allow_interspersed_args
self.ignore_unknown_options = ctx.ignore_unknown_options
self._short_opt = {}
self._long_opt = {}
self._opt_prefixes = {"-", "--"}
self._args = []
def add_option(self, opts, dest, action=None, nargs=1, const=None, obj=None):
"""Adds a new option named `dest` to the parser. The destination
is not inferred (unlike with optparse) and needs to be explicitly
provided. Action can be any of ``store``, ``store_const``,
``append``, ``appnd_const`` or ``count``.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
if obj is None:
obj = dest
opts = [normalize_opt(opt, self.ctx) for opt in opts]
option = Option(opts, dest, action=action, nargs=nargs, const=const, obj=obj)
self._opt_prefixes.update(option.prefixes)
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
def add_argument(self, dest, nargs=1, obj=None):
"""Adds a positional argument named `dest` to the parser.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
if obj is None:
obj = dest
self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
def parse_args(self, args):
"""Parses positional arguments and returns ``(values, args, order)``
for the parsed options and arguments as well as the leftover
arguments if there are any. The order is a list of objects as they
appear on the command line. If arguments appear multiple times they
will be memorized multiple times as well.
"""
state = ParsingState(args)
try:
self._process_args_for_options(state)
self._process_args_for_args(state)
except UsageError:
if self.ctx is None or not self.ctx.resilient_parsing:
raise
return state.opts, state.largs, state.order
def _process_args_for_args(self, state):
pargs, args = _unpack_args(
state.largs + state.rargs, [x.nargs for x in self._args]
)
for idx, arg in enumerate(self._args):
arg.process(pargs[idx], state)
state.largs = args
state.rargs = []
def _process_args_for_options(self, state):
while state.rargs:
arg = state.rargs.pop(0)
arglen = len(arg)
# Double dashes always handled explicitly regardless of what
# prefixes are valid.
if arg == "--":
return
elif arg[:1] in self._opt_prefixes and arglen > 1:
self._process_opts(arg, state)
elif self.allow_interspersed_args:
state.largs.append(arg)
else:
state.rargs.insert(0, arg)
return
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt, explicit_value, state):
if opt not in self._long_opt:
possibilities = [word for word in self._long_opt if word.startswith(opt)]
raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx)
option = self._long_opt[opt]
if option.takes_value:
# At this point it's safe to modify rargs by injecting the
# explicit value, because no exception is raised in this
# branch. This means that the inserted value will be fully
# consumed.
if explicit_value is not None:
state.rargs.insert(0, explicit_value)
nargs = option.nargs
if len(state.rargs) < nargs:
_error_opt_args(nargs, opt)
elif nargs == 1:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
elif explicit_value is not None:
raise BadOptionUsage(opt, "{} option does not take a value".format(opt))
else:
value = None
option.process(value, state)
def _match_short_opt(self, arg, state):
stop = False
i = 1
prefix = arg[0]
unknown_options = []
for ch in arg[1:]:
opt = normalize_opt(prefix + ch, self.ctx)
option = self._short_opt.get(opt)
i += 1
if not option:
if self.ignore_unknown_options:
unknown_options.append(ch)
continue
raise NoSuchOption(opt, ctx=self.ctx)
if option.takes_value:
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
state.rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(state.rargs) < nargs:
_error_opt_args(nargs, opt)
elif nargs == 1:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
else:
value = None
option.process(value, state)
if stop:
break
# If we got any unknown options we re-combinate the string of the
# remaining options and re-attach the prefix, then report that
# to the state as new larg. This way there is basic combinatorics
# that can be achieved while still ignoring unknown arguments.
if self.ignore_unknown_options and unknown_options:
state.largs.append("{}{}".format(prefix, "".join(unknown_options)))
def _process_opts(self, arg, state):
explicit_value = None
# Long option handling happens in two parts. The first part is
# supporting explicitly attached values. In any case, we will try
# to long match the option first.
if "=" in arg:
long_opt, explicit_value = arg.split("=", 1)
else:
long_opt = arg
norm_long_opt = normalize_opt(long_opt, self.ctx)
# At this point we will match the (assumed) long option through
# the long option matching code. Note that this allows options
# like "-foo" to be matched as long options.
try:
self._match_long_opt(norm_long_opt, explicit_value, state)
except NoSuchOption:
# At this point the long option matching failed, and we need
# to try with short options. However there is a special rule
# which says, that if we have a two character options prefix
# (applies to "--foo" for instance), we do not dispatch to the
# short option code and will instead raise the no option
# error.
if arg[:2] not in self._opt_prefixes:
return self._match_short_opt(arg, state)
if not self.ignore_unknown_options:
raise
state.largs.append(arg)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/click/termui.py
|
import inspect
import io
import itertools
import os
import struct
import sys
from ._compat import DEFAULT_COLUMNS
from ._compat import get_winterm_size
from ._compat import isatty
from ._compat import raw_input
from ._compat import string_types
from ._compat import strip_ansi
from ._compat import text_type
from ._compat import WIN
from .exceptions import Abort
from .exceptions import UsageError
from .globals import resolve_color_default
from .types import Choice
from .types import convert_type
from .types import Path
from .utils import echo
from .utils import LazyFile
# The prompt functions to use. The doc tools currently override these
# functions to customize how they work.
visible_prompt_func = raw_input
_ansi_colors = {
"black": 30,
"red": 31,
"green": 32,
"yellow": 33,
"blue": 34,
"magenta": 35,
"cyan": 36,
"white": 37,
"reset": 39,
"bright_black": 90,
"bright_red": 91,
"bright_green": 92,
"bright_yellow": 93,
"bright_blue": 94,
"bright_magenta": 95,
"bright_cyan": 96,
"bright_white": 97,
}
_ansi_reset_all = "\033[0m"
def hidden_prompt_func(prompt):
import getpass
return getpass.getpass(prompt)
def _build_prompt(
text, suffix, show_default=False, default=None, show_choices=True, type=None
):
prompt = text
if type is not None and show_choices and isinstance(type, Choice):
prompt += " ({})".format(", ".join(map(str, type.choices)))
if default is not None and show_default:
prompt = "{} [{}]".format(prompt, _format_default(default))
return prompt + suffix
def _format_default(default):
if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, "name"):
return default.name
return default
def prompt(
text,
default=None,
hide_input=False,
confirmation_prompt=False,
type=None,
value_proc=None,
prompt_suffix=": ",
show_default=True,
err=False,
show_choices=True,
):
"""Prompts a user for input. This is a convenience function that can
be used to prompt a user for input later.
If the user aborts the input by sending a interrupt signal, this
function will catch it and raise a :exc:`Abort` exception.
.. versionadded:: 7.0
Added the show_choices parameter.
.. versionadded:: 6.0
Added unicode support for cmd.exe on Windows.
.. versionadded:: 4.0
Added the `err` parameter.
:param text: the text to show for the prompt.
:param default: the default value to use if no input happens. If this
is not given it will prompt until it's aborted.
:param hide_input: if this is set to true then the input value will
be hidden.
:param confirmation_prompt: asks for confirmation for the value.
:param type: the type to use to check the value against.
:param value_proc: if this parameter is provided it's a function that
is invoked instead of the type conversion to
convert a value.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
:param show_choices: Show or hide choices if the passed type is a Choice.
For example if type is a Choice of either day or week,
show_choices is true and text is "Group by" then the
prompt will be "Group by (day, week): ".
"""
result = None
def prompt_func(text):
f = hidden_prompt_func if hide_input else visible_prompt_func
try:
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
echo(text, nl=False, err=err)
return f("")
except (KeyboardInterrupt, EOFError):
# getpass doesn't print a newline if the user aborts input with ^C.
# Allegedly this behavior is inherited from getpass(3).
# A doc bug has been filed at https://bugs.python.org/issue24711
if hide_input:
echo(None, err=err)
raise Abort()
if value_proc is None:
value_proc = convert_type(type, default)
prompt = _build_prompt(
text, prompt_suffix, show_default, default, show_choices, type
)
while 1:
while 1:
value = prompt_func(prompt)
if value:
break
elif default is not None:
if isinstance(value_proc, Path):
# validate Path default value(exists, dir_okay etc.)
value = default
break
return default
try:
result = value_proc(value)
except UsageError as e:
echo("Error: {}".format(e.message), err=err) # noqa: B306
continue
if not confirmation_prompt:
return result
while 1:
value2 = prompt_func("Repeat for confirmation: ")
if value2:
break
if value == value2:
return result
echo("Error: the two entered values do not match", err=err)
def confirm(
text, default=False, abort=False, prompt_suffix=": ", show_default=True, err=False
):
"""Prompts for confirmation (yes/no question).
If the user aborts the input by sending a interrupt signal this
function will catch it and raise a :exc:`Abort` exception.
.. versionadded:: 4.0
Added the `err` parameter.
:param text: the question to ask.
:param default: the default for the prompt.
:param abort: if this is set to `True` a negative answer aborts the
exception by raising :exc:`Abort`.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
"""
prompt = _build_prompt(
text, prompt_suffix, show_default, "Y/n" if default else "y/N"
)
while 1:
try:
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
echo(prompt, nl=False, err=err)
value = visible_prompt_func("").lower().strip()
except (KeyboardInterrupt, EOFError):
raise Abort()
if value in ("y", "yes"):
rv = True
elif value in ("n", "no"):
rv = False
elif value == "":
rv = default
else:
echo("Error: invalid input", err=err)
continue
break
if abort and not rv:
raise Abort()
return rv
def get_terminal_size():
"""Returns the current size of the terminal as tuple in the form
``(width, height)`` in columns and rows.
"""
# If shutil has get_terminal_size() (Python 3.3 and later) use that
if sys.version_info >= (3, 3):
import shutil
shutil_get_terminal_size = getattr(shutil, "get_terminal_size", None)
if shutil_get_terminal_size:
sz = shutil_get_terminal_size()
return sz.columns, sz.lines
# We provide a sensible default for get_winterm_size() when being invoked
# inside a subprocess. Without this, it would not provide a useful input.
if get_winterm_size is not None:
size = get_winterm_size()
if size == (0, 0):
return (79, 24)
else:
return size
def ioctl_gwinsz(fd):
try:
import fcntl
import termios
cr = struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234"))
except Exception:
return
return cr
cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
try:
cr = ioctl_gwinsz(fd)
finally:
os.close(fd)
except Exception:
pass
if not cr or not cr[0] or not cr[1]:
cr = (os.environ.get("LINES", 25), os.environ.get("COLUMNS", DEFAULT_COLUMNS))
return int(cr[1]), int(cr[0])
def echo_via_pager(text_or_generator, color=None):
"""This function takes a text and shows it via an environment specific
pager on stdout.
.. versionchanged:: 3.0
Added the `color` flag.
:param text_or_generator: the text to page, or alternatively, a
generator emitting the text to page.
:param color: controls if the pager supports ANSI colors or not. The
default is autodetection.
"""
color = resolve_color_default(color)
if inspect.isgeneratorfunction(text_or_generator):
i = text_or_generator()
elif isinstance(text_or_generator, string_types):
i = [text_or_generator]
else:
i = iter(text_or_generator)
# convert every element of i to a text type if necessary
text_generator = (el if isinstance(el, string_types) else text_type(el) for el in i)
from ._termui_impl import pager
return pager(itertools.chain(text_generator, "\n"), color)
def progressbar(
iterable=None,
length=None,
label=None,
show_eta=True,
show_percent=None,
show_pos=False,
item_show_func=None,
fill_char="#",
empty_char="-",
bar_template="%(label)s [%(bar)s] %(info)s",
info_sep=" ",
width=36,
file=None,
color=None,
):
"""This function creates an iterable context manager that can be used
to iterate over something while showing a progress bar. It will
either iterate over the `iterable` or `length` items (that are counted
up). While iteration happens, this function will print a rendered
progress bar to the given `file` (defaults to stdout) and will attempt
to calculate remaining time and more. By default, this progress bar
will not be rendered if the file is not a terminal.
The context manager creates the progress bar. When the context
manager is entered the progress bar is already created. With every
iteration over the progress bar, the iterable passed to the bar is
advanced and the bar is updated. When the context manager exits,
a newline is printed and the progress bar is finalized on screen.
Note: The progress bar is currently designed for use cases where the
total progress can be expected to take at least several seconds.
Because of this, the ProgressBar class object won't display
progress that is considered too fast, and progress where the time
between steps is less than a second.
No printing must happen or the progress bar will be unintentionally
destroyed.
Example usage::
with progressbar(items) as bar:
for item in bar:
do_something_with(item)
Alternatively, if no iterable is specified, one can manually update the
progress bar through the `update()` method instead of directly
iterating over the progress bar. The update method accepts the number
of steps to increment the bar with::
with progressbar(length=chunks.total_bytes) as bar:
for chunk in chunks:
process_chunk(chunk)
bar.update(chunks.bytes)
.. versionadded:: 2.0
.. versionadded:: 4.0
Added the `color` parameter. Added a `update` method to the
progressbar object.
:param iterable: an iterable to iterate over. If not provided the length
is required.
:param length: the number of items to iterate over. By default the
progressbar will attempt to ask the iterator about its
length, which might or might not work. If an iterable is
also provided this parameter can be used to override the
length. If an iterable is not provided the progress bar
will iterate over a range of that length.
:param label: the label to show next to the progress bar.
:param show_eta: enables or disables the estimated time display. This is
automatically disabled if the length cannot be
determined.
:param show_percent: enables or disables the percentage display. The
default is `True` if the iterable has a length or
`False` if not.
:param show_pos: enables or disables the absolute position display. The
default is `False`.
:param item_show_func: a function called with the current item which
can return a string to show the current item
next to the progress bar. Note that the current
item can be `None`!
:param fill_char: the character to use to show the filled part of the
progress bar.
:param empty_char: the character to use to show the non-filled part of
the progress bar.
:param bar_template: the format string to use as template for the bar.
The parameters in it are ``label`` for the label,
``bar`` for the progress bar and ``info`` for the
info section.
:param info_sep: the separator between multiple info items (eta etc.)
:param width: the width of the progress bar in characters, 0 means full
terminal width
:param file: the file to write to. If this is not a terminal then
only the label is printed.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are included anywhere in the progress bar output
which is not the case by default.
"""
from ._termui_impl import ProgressBar
color = resolve_color_default(color)
return ProgressBar(
iterable=iterable,
length=length,
show_eta=show_eta,
show_percent=show_percent,
show_pos=show_pos,
item_show_func=item_show_func,
fill_char=fill_char,
empty_char=empty_char,
bar_template=bar_template,
info_sep=info_sep,
file=file,
label=label,
width=width,
color=color,
)
def clear():
"""Clears the terminal screen. This will have the effect of clearing
the whole visible space of the terminal and moving the cursor to the
top left. This does not do anything if not connected to a terminal.
.. versionadded:: 2.0
"""
if not isatty(sys.stdout):
return
# If we're on Windows and we don't have colorama available, then we
# clear the screen by shelling out. Otherwise we can use an escape
# sequence.
if WIN:
os.system("cls")
else:
sys.stdout.write("\033[2J\033[1;1H")
def style(
text,
fg=None,
bg=None,
bold=None,
dim=None,
underline=None,
blink=None,
reverse=None,
reset=True,
):
"""Styles a text with ANSI styles and returns the new string. By
default the styling is self contained which means that at the end
of the string a reset code is issued. This can be prevented by
passing ``reset=False``.
Examples::
click.echo(click.style('Hello World!', fg='green'))
click.echo(click.style('ATTENTION!', blink=True))
click.echo(click.style('Some things', reverse=True, fg='cyan'))
Supported color names:
* ``black`` (might be a gray)
* ``red``
* ``green``
* ``yellow`` (might be an orange)
* ``blue``
* ``magenta``
* ``cyan``
* ``white`` (might be light gray)
* ``bright_black``
* ``bright_red``
* ``bright_green``
* ``bright_yellow``
* ``bright_blue``
* ``bright_magenta``
* ``bright_cyan``
* ``bright_white``
* ``reset`` (reset the color code only)
.. versionadded:: 2.0
.. versionadded:: 7.0
Added support for bright colors.
:param text: the string to style with ansi codes.
:param fg: if provided this will become the foreground color.
:param bg: if provided this will become the background color.
:param bold: if provided this will enable or disable bold mode.
:param dim: if provided this will enable or disable dim mode. This is
badly supported.
:param underline: if provided this will enable or disable underline.
:param blink: if provided this will enable or disable blinking.
:param reverse: if provided this will enable or disable inverse
rendering (foreground becomes background and the
other way round).
:param reset: by default a reset-all code is added at the end of the
string which means that styles do not carry over. This
can be disabled to compose styles.
"""
bits = []
if fg:
try:
bits.append("\033[{}m".format(_ansi_colors[fg]))
except KeyError:
raise TypeError("Unknown color '{}'".format(fg))
if bg:
try:
bits.append("\033[{}m".format(_ansi_colors[bg] + 10))
except KeyError:
raise TypeError("Unknown color '{}'".format(bg))
if bold is not None:
bits.append("\033[{}m".format(1 if bold else 22))
if dim is not None:
bits.append("\033[{}m".format(2 if dim else 22))
if underline is not None:
bits.append("\033[{}m".format(4 if underline else 24))
if blink is not None:
bits.append("\033[{}m".format(5 if blink else 25))
if reverse is not None:
bits.append("\033[{}m".format(7 if reverse else 27))
bits.append(text)
if reset:
bits.append(_ansi_reset_all)
return "".join(bits)
def unstyle(text):
"""Removes ANSI styling information from a string. Usually it's not
necessary to use this function as Click's echo function will
automatically remove styling if necessary.
.. versionadded:: 2.0
:param text: the text to remove style information from.
"""
return strip_ansi(text)
def secho(message=None, file=None, nl=True, err=False, color=None, **styles):
"""This function combines :func:`echo` and :func:`style` into one
call. As such the following two calls are the same::
click.secho('Hello World!', fg='green')
click.echo(click.style('Hello World!', fg='green'))
All keyword arguments are forwarded to the underlying functions
depending on which one they go with.
.. versionadded:: 2.0
"""
if message is not None:
message = style(message, **styles)
return echo(message, file=file, nl=nl, err=err, color=color)
def edit(
text=None, editor=None, env=None, require_save=True, extension=".txt", filename=None
):
r"""Edits the given text in the defined editor. If an editor is given
(should be the full path to the executable but the regular operating
system search path is used for finding the executable) it overrides
the detected editor. Optionally, some environment variables can be
used. If the editor is closed without changes, `None` is returned. In
case a file is edited directly the return value is always `None` and
`require_save` and `extension` are ignored.
If the editor cannot be opened a :exc:`UsageError` is raised.
Note for Windows: to simplify cross-platform usage, the newlines are
automatically converted from POSIX to Windows and vice versa. As such,
the message here will have ``\n`` as newline markers.
:param text: the text to edit.
:param editor: optionally the editor to use. Defaults to automatic
detection.
:param env: environment variables to forward to the editor.
:param require_save: if this is true, then not saving in the editor
will make the return value become `None`.
:param extension: the extension to tell the editor about. This defaults
to `.txt` but changing this might change syntax
highlighting.
:param filename: if provided it will edit this file instead of the
provided text contents. It will not use a temporary
file as an indirection in that case.
"""
from ._termui_impl import Editor
editor = Editor(
editor=editor, env=env, require_save=require_save, extension=extension
)
if filename is None:
return editor.edit(text)
editor.edit_file(filename)
def launch(url, wait=False, locate=False):
"""This function launches the given URL (or filename) in the default
viewer application for this file type. If this is an executable, it
might launch the executable in a new session. The return value is
the exit code of the launched application. Usually, ``0`` indicates
success.
Examples::
click.launch('https://click.palletsprojects.com/')
click.launch('/my/downloaded/file', locate=True)
.. versionadded:: 2.0
:param url: URL or filename of the thing to launch.
:param wait: waits for the program to stop.
:param locate: if this is set to `True` then instead of launching the
application associated with the URL it will attempt to
launch a file manager with the file located. This
might have weird effects if the URL does not point to
the filesystem.
"""
from ._termui_impl import open_url
return open_url(url, wait=wait, locate=locate)
# If this is provided, getchar() calls into this instead. This is used
# for unittesting purposes.
_getchar = None
def getchar(echo=False):
"""Fetches a single character from the terminal and returns it. This
will always return a unicode character and under certain rare
circumstances this might return more than one character. The
situations which more than one character is returned is when for
whatever reason multiple characters end up in the terminal buffer or
standard input was not actually a terminal.
Note that this will always read from the terminal, even if something
is piped into the standard input.
Note for Windows: in rare cases when typing non-ASCII characters, this
function might wait for a second character and then return both at once.
This is because certain Unicode characters look like special-key markers.
.. versionadded:: 2.0
:param echo: if set to `True`, the character read will also show up on
the terminal. The default is to not show it.
"""
f = _getchar
if f is None:
from ._termui_impl import getchar as f
return f(echo)
def raw_terminal():
from ._termui_impl import raw_terminal as f
return f()
def pause(info="Press any key to continue ...", err=False):
"""This command stops execution and waits for the user to press any
key to continue. This is similar to the Windows batch "pause"
command. If the program is not run through a terminal, this command
will instead do nothing.
.. versionadded:: 2.0
.. versionadded:: 4.0
Added the `err` parameter.
:param info: the info string to print before pausing.
:param err: if set to message goes to ``stderr`` instead of
``stdout``, the same as with echo.
"""
if not isatty(sys.stdin) or not isatty(sys.stdout):
return
try:
if info:
echo(info, nl=False, err=err)
try:
getchar()
except (KeyboardInterrupt, EOFError):
pass
finally:
if info:
echo(err=err)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/click/utils.py
|
import os
import sys
from ._compat import _default_text_stderr
from ._compat import _default_text_stdout
from ._compat import auto_wrap_for_ansi
from ._compat import binary_streams
from ._compat import filename_to_ui
from ._compat import get_filesystem_encoding
from ._compat import get_streerror
from ._compat import is_bytes
from ._compat import open_stream
from ._compat import PY2
from ._compat import should_strip_ansi
from ._compat import string_types
from ._compat import strip_ansi
from ._compat import text_streams
from ._compat import text_type
from ._compat import WIN
from .globals import resolve_color_default
if not PY2:
from ._compat import _find_binary_writer
elif WIN:
from ._winconsole import _get_windows_argv
from ._winconsole import _hash_py_argv
from ._winconsole import _initial_argv_hash
echo_native_types = string_types + (bytes, bytearray)
def _posixify(name):
return "-".join(name.split()).lower()
def safecall(func):
"""Wraps a function so that it swallows exceptions."""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
pass
return wrapper
def make_str(value):
"""Converts a value into a valid string."""
if isinstance(value, bytes):
try:
return value.decode(get_filesystem_encoding())
except UnicodeError:
return value.decode("utf-8", "replace")
return text_type(value)
def make_default_short_help(help, max_length=45):
"""Return a condensed version of help string."""
words = help.split()
total_length = 0
result = []
done = False
for word in words:
if word[-1:] == ".":
done = True
new_length = 1 + len(word) if result else len(word)
if total_length + new_length > max_length:
result.append("...")
done = True
else:
if result:
result.append(" ")
result.append(word)
if done:
break
total_length += new_length
return "".join(result)
class LazyFile(object):
"""A lazy file works like a regular file but it does not fully open
the file but it does perform some basic checks early to see if the
filename parameter does make sense. This is useful for safely opening
files for writing.
"""
def __init__(
self, filename, mode="r", encoding=None, errors="strict", atomic=False
):
self.name = filename
self.mode = mode
self.encoding = encoding
self.errors = errors
self.atomic = atomic
if filename == "-":
self._f, self.should_close = open_stream(filename, mode, encoding, errors)
else:
if "r" in mode:
# Open and close the file in case we're opening it for
# reading so that we can catch at least some errors in
# some cases early.
open(filename, mode).close()
self._f = None
self.should_close = True
def __getattr__(self, name):
return getattr(self.open(), name)
def __repr__(self):
if self._f is not None:
return repr(self._f)
return "<unopened file '{}' {}>".format(self.name, self.mode)
def open(self):
"""Opens the file if it's not yet open. This call might fail with
a :exc:`FileError`. Not handling this error will produce an error
that Click shows.
"""
if self._f is not None:
return self._f
try:
rv, self.should_close = open_stream(
self.name, self.mode, self.encoding, self.errors, atomic=self.atomic
)
except (IOError, OSError) as e: # noqa: E402
from .exceptions import FileError
raise FileError(self.name, hint=get_streerror(e))
self._f = rv
return rv
def close(self):
"""Closes the underlying file, no matter what."""
if self._f is not None:
self._f.close()
def close_intelligently(self):
"""This function only closes the file if it was opened by the lazy
file wrapper. For instance this will never close stdin.
"""
if self.should_close:
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close_intelligently()
def __iter__(self):
self.open()
return iter(self._f)
class KeepOpenFile(object):
def __init__(self, file):
self._file = file
def __getattr__(self, name):
return getattr(self._file, name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
pass
def __repr__(self):
return repr(self._file)
def __iter__(self):
return iter(self._file)
def echo(message=None, file=None, nl=True, err=False, color=None):
"""Prints a message plus a newline to the given file or stdout. On
first sight, this looks like the print function, but it has improved
support for handling Unicode and binary data that does not fail no
matter how badly configured the system is.
Primarily it means that you can print binary data as well as Unicode
data on both 2.x and 3.x to the given file in the most appropriate way
possible. This is a very carefree function in that it will try its
best to not fail. As of Click 6.0 this includes support for unicode
output on the Windows console.
In addition to that, if `colorama`_ is installed, the echo function will
also support clever handling of ANSI codes. Essentially it will then
do the following:
- add transparent handling of ANSI color codes on Windows.
- hide ANSI codes automatically if the destination file is not a
terminal.
.. _colorama: https://pypi.org/project/colorama/
.. versionchanged:: 6.0
As of Click 6.0 the echo function will properly support unicode
output on the windows console. Not that click does not modify
the interpreter in any way which means that `sys.stdout` or the
print statement or function will still not provide unicode support.
.. versionchanged:: 2.0
Starting with version 2.0 of Click, the echo function will work
with colorama if it's installed.
.. versionadded:: 3.0
The `err` parameter was added.
.. versionchanged:: 4.0
Added the `color` flag.
:param message: the message to print
:param file: the file to write to (defaults to ``stdout``)
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``. This is faster and easier than calling
:func:`get_text_stderr` yourself.
:param nl: if set to `True` (the default) a newline is printed afterwards.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection.
"""
if file is None:
if err:
file = _default_text_stderr()
else:
file = _default_text_stdout()
# Convert non bytes/text into the native string type.
if message is not None and not isinstance(message, echo_native_types):
message = text_type(message)
if nl:
message = message or u""
if isinstance(message, text_type):
message += u"\n"
else:
message += b"\n"
# If there is a message, and we're in Python 3, and the value looks
# like bytes, we manually need to find the binary stream and write the
# message in there. This is done separately so that most stream
# types will work as you would expect. Eg: you can write to StringIO
# for other cases.
if message and not PY2 and is_bytes(message):
binary_file = _find_binary_writer(file)
if binary_file is not None:
file.flush()
binary_file.write(message)
binary_file.flush()
return
# ANSI-style support. If there is no message or we are dealing with
# bytes nothing is happening. If we are connected to a file we want
# to strip colors. If we are on windows we either wrap the stream
# to strip the color or we use the colorama support to translate the
# ansi codes to API calls.
if message and not is_bytes(message):
color = resolve_color_default(color)
if should_strip_ansi(file, color):
message = strip_ansi(message)
elif WIN:
if auto_wrap_for_ansi is not None:
file = auto_wrap_for_ansi(file)
elif not color:
message = strip_ansi(message)
if message:
file.write(message)
file.flush()
def get_binary_stream(name):
"""Returns a system stream for byte processing. This essentially
returns the stream from the sys module with the given name but it
solves some compatibility issues between different Python versions.
Primarily this function is necessary for getting binary streams on
Python 3.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
"""
opener = binary_streams.get(name)
if opener is None:
raise TypeError("Unknown standard stream '{}'".format(name))
return opener()
def get_text_stream(name, encoding=None, errors="strict"):
"""Returns a system stream for text processing. This usually returns
a wrapped stream around a binary stream returned from
:func:`get_binary_stream` but it also can take shortcuts on Python 3
for already correctly configured streams.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
:param encoding: overrides the detected default encoding.
:param errors: overrides the default error mode.
"""
opener = text_streams.get(name)
if opener is None:
raise TypeError("Unknown standard stream '{}'".format(name))
return opener(encoding, errors)
def open_file(
filename, mode="r", encoding=None, errors="strict", lazy=False, atomic=False
):
"""This is similar to how the :class:`File` works but for manual
usage. Files are opened non lazy by default. This can open regular
files as well as stdin/stdout if ``'-'`` is passed.
If stdin/stdout is returned the stream is wrapped so that the context
manager will not close the stream accidentally. This makes it possible
to always use the function like this without having to worry to
accidentally close a standard stream::
with open_file(filename) as f:
...
.. versionadded:: 3.0
:param filename: the name of the file to open (or ``'-'`` for stdin/stdout).
:param mode: the mode in which to open the file.
:param encoding: the encoding to use.
:param errors: the error handling for this file.
:param lazy: can be flipped to true to open the file lazily.
:param atomic: in atomic mode writes go into a temporary file and it's
moved on close.
"""
if lazy:
return LazyFile(filename, mode, encoding, errors, atomic=atomic)
f, should_close = open_stream(filename, mode, encoding, errors, atomic=atomic)
if not should_close:
f = KeepOpenFile(f)
return f
def get_os_args():
"""This returns the argument part of sys.argv in the most appropriate
form for processing. What this means is that this return value is in
a format that works for Click to process but does not necessarily
correspond well to what's actually standard for the interpreter.
On most environments the return value is ``sys.argv[:1]`` unchanged.
However if you are on Windows and running Python 2 the return value
will actually be a list of unicode strings instead because the
default behavior on that platform otherwise will not be able to
carry all possible values that sys.argv can have.
.. versionadded:: 6.0
"""
# We can only extract the unicode argv if sys.argv has not been
# changed since the startup of the application.
if PY2 and WIN and _initial_argv_hash == _hash_py_argv():
return _get_windows_argv()
return sys.argv[1:]
def format_filename(filename, shorten=False):
"""Formats a filename for user display. The main purpose of this
function is to ensure that the filename can be displayed at all. This
will decode the filename to unicode if necessary in a way that it will
not fail. Optionally, it can shorten the filename to not include the
full path to the filename.
:param filename: formats a filename for UI display. This will also convert
the filename into unicode without failing.
:param shorten: this optionally shortens the filename to strip of the
path that leads up to it.
"""
if shorten:
filename = os.path.basename(filename)
return filename_to_ui(filename)
def get_app_dir(app_name, roaming=True, force_posix=False):
r"""Returns the config folder for the application. The default behavior
is to return whatever is most appropriate for the operating system.
To give you an idea, for an app called ``"Foo Bar"``, something like
the following folders could be returned:
Mac OS X:
``~/Library/Application Support/Foo Bar``
Mac OS X (POSIX):
``~/.foo-bar``
Unix:
``~/.config/foo-bar``
Unix (POSIX):
``~/.foo-bar``
Win XP (roaming):
``C:\Documents and Settings\<user>\Local Settings\Application Data\Foo Bar``
Win XP (not roaming):
``C:\Documents and Settings\<user>\Application Data\Foo Bar``
Win 7 (roaming):
``C:\Users\<user>\AppData\Roaming\Foo Bar``
Win 7 (not roaming):
``C:\Users\<user>\AppData\Local\Foo Bar``
.. versionadded:: 2.0
:param app_name: the application name. This should be properly capitalized
and can contain whitespace.
:param roaming: controls if the folder should be roaming or not on Windows.
Has no affect otherwise.
:param force_posix: if this is set to `True` then on any POSIX system the
folder will be stored in the home folder with a leading
dot instead of the XDG config home or darwin's
application support folder.
"""
if WIN:
key = "APPDATA" if roaming else "LOCALAPPDATA"
folder = os.environ.get(key)
if folder is None:
folder = os.path.expanduser("~")
return os.path.join(folder, app_name)
if force_posix:
return os.path.join(os.path.expanduser("~/.{}".format(_posixify(app_name))))
if sys.platform == "darwin":
return os.path.join(
os.path.expanduser("~/Library/Application Support"), app_name
)
return os.path.join(
os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config")),
_posixify(app_name),
)
class PacifyFlushWrapper(object):
"""This wrapper is used to catch and suppress BrokenPipeErrors resulting
from ``.flush()`` being called on broken pipe during the shutdown/final-GC
of the Python interpreter. Notably ``.flush()`` is always called on
``sys.stdout`` and ``sys.stderr``. So as to have minimal impact on any
other cleanup code, and the case where the underlying file is not a broken
pipe, all calls and attributes are proxied.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
def flush(self):
try:
self.wrapped.flush()
except IOError as e:
import errno
if e.errno != errno.EPIPE:
raise
def __getattr__(self, attr):
return getattr(self.wrapped, attr)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/click/_bashcomplete.py
|
import copy
import os
import re
from .core import Argument
from .core import MultiCommand
from .core import Option
from .parser import split_arg_string
from .types import Choice
from .utils import echo
try:
from collections import abc
except ImportError:
import collections as abc
WORDBREAK = "="
# Note, only BASH version 4.4 and later have the nosort option.
COMPLETION_SCRIPT_BASH = """
%(complete_func)s() {
local IFS=$'\n'
COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\
COMP_CWORD=$COMP_CWORD \\
%(autocomplete_var)s=complete $1 ) )
return 0
}
%(complete_func)setup() {
local COMPLETION_OPTIONS=""
local BASH_VERSION_ARR=(${BASH_VERSION//./ })
# Only BASH version 4.4 and later have the nosort option.
if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] \
&& [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then
COMPLETION_OPTIONS="-o nosort"
fi
complete $COMPLETION_OPTIONS -F %(complete_func)s %(script_names)s
}
%(complete_func)setup
"""
COMPLETION_SCRIPT_ZSH = """
#compdef %(script_names)s
%(complete_func)s() {
local -a completions
local -a completions_with_descriptions
local -a response
(( ! $+commands[%(script_names)s] )) && return 1
response=("${(@f)$( env COMP_WORDS=\"${words[*]}\" \\
COMP_CWORD=$((CURRENT-1)) \\
%(autocomplete_var)s=\"complete_zsh\" \\
%(script_names)s )}")
for key descr in ${(kv)response}; do
if [[ "$descr" == "_" ]]; then
completions+=("$key")
else
completions_with_descriptions+=("$key":"$descr")
fi
done
if [ -n "$completions_with_descriptions" ]; then
_describe -V unsorted completions_with_descriptions -U
fi
if [ -n "$completions" ]; then
compadd -U -V unsorted -a completions
fi
compstate[insert]="automenu"
}
compdef %(complete_func)s %(script_names)s
"""
COMPLETION_SCRIPT_FISH = (
"complete --no-files --command %(script_names)s --arguments"
' "(env %(autocomplete_var)s=complete_fish'
" COMP_WORDS=(commandline -cp) COMP_CWORD=(commandline -t)"
' %(script_names)s)"'
)
_completion_scripts = {
"bash": COMPLETION_SCRIPT_BASH,
"zsh": COMPLETION_SCRIPT_ZSH,
"fish": COMPLETION_SCRIPT_FISH,
}
_invalid_ident_char_re = re.compile(r"[^a-zA-Z0-9_]")
def get_completion_script(prog_name, complete_var, shell):
cf_name = _invalid_ident_char_re.sub("", prog_name.replace("-", "_"))
script = _completion_scripts.get(shell, COMPLETION_SCRIPT_BASH)
return (
script
% {
"complete_func": "_{}_completion".format(cf_name),
"script_names": prog_name,
"autocomplete_var": complete_var,
}
).strip() + ";"
def resolve_ctx(cli, prog_name, args):
"""Parse into a hierarchy of contexts. Contexts are connected
through the parent variable.
:param cli: command definition
:param prog_name: the program that is running
:param args: full list of args
:return: the final context/command parsed
"""
ctx = cli.make_context(prog_name, args, resilient_parsing=True)
args = ctx.protected_args + ctx.args
while args:
if isinstance(ctx.command, MultiCommand):
if not ctx.command.chain:
cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
if cmd is None:
return ctx
ctx = cmd.make_context(
cmd_name, args, parent=ctx, resilient_parsing=True
)
args = ctx.protected_args + ctx.args
else:
# Walk chained subcommand contexts saving the last one.
while args:
cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
if cmd is None:
return ctx
sub_ctx = cmd.make_context(
cmd_name,
args,
parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False,
resilient_parsing=True,
)
args = sub_ctx.args
ctx = sub_ctx
args = sub_ctx.protected_args + sub_ctx.args
else:
break
return ctx
def start_of_option(param_str):
"""
:param param_str: param_str to check
:return: whether or not this is the start of an option declaration
(i.e. starts "-" or "--")
"""
return param_str and param_str[:1] == "-"
def is_incomplete_option(all_args, cmd_param):
"""
:param all_args: the full original list of args supplied
:param cmd_param: the current command paramter
:return: whether or not the last option declaration (i.e. starts
"-" or "--") is incomplete and corresponds to this cmd_param. In
other words whether this cmd_param option can still accept
values
"""
if not isinstance(cmd_param, Option):
return False
if cmd_param.is_flag:
return False
last_option = None
for index, arg_str in enumerate(
reversed([arg for arg in all_args if arg != WORDBREAK])
):
if index + 1 > cmd_param.nargs:
break
if start_of_option(arg_str):
last_option = arg_str
return True if last_option and last_option in cmd_param.opts else False
def is_incomplete_argument(current_params, cmd_param):
"""
:param current_params: the current params and values for this
argument as already entered
:param cmd_param: the current command parameter
:return: whether or not the last argument is incomplete and
corresponds to this cmd_param. In other words whether or not the
this cmd_param argument can still accept values
"""
if not isinstance(cmd_param, Argument):
return False
current_param_values = current_params[cmd_param.name]
if current_param_values is None:
return True
if cmd_param.nargs == -1:
return True
if (
isinstance(current_param_values, abc.Iterable)
and cmd_param.nargs > 1
and len(current_param_values) < cmd_param.nargs
):
return True
return False
def get_user_autocompletions(ctx, args, incomplete, cmd_param):
"""
:param ctx: context associated with the parsed command
:param args: full list of args
:param incomplete: the incomplete text to autocomplete
:param cmd_param: command definition
:return: all the possible user-specified completions for the param
"""
results = []
if isinstance(cmd_param.type, Choice):
# Choices don't support descriptions.
results = [
(c, None) for c in cmd_param.type.choices if str(c).startswith(incomplete)
]
elif cmd_param.autocompletion is not None:
dynamic_completions = cmd_param.autocompletion(
ctx=ctx, args=args, incomplete=incomplete
)
results = [
c if isinstance(c, tuple) else (c, None) for c in dynamic_completions
]
return results
def get_visible_commands_starting_with(ctx, starts_with):
"""
:param ctx: context associated with the parsed command
:starts_with: string that visible commands must start with.
:return: all visible (not hidden) commands that start with starts_with.
"""
for c in ctx.command.list_commands(ctx):
if c.startswith(starts_with):
command = ctx.command.get_command(ctx, c)
if not command.hidden:
yield command
def add_subcommand_completions(ctx, incomplete, completions_out):
# Add subcommand completions.
if isinstance(ctx.command, MultiCommand):
completions_out.extend(
[
(c.name, c.get_short_help_str())
for c in get_visible_commands_starting_with(ctx, incomplete)
]
)
# Walk up the context list and add any other completion
# possibilities from chained commands
while ctx.parent is not None:
ctx = ctx.parent
if isinstance(ctx.command, MultiCommand) and ctx.command.chain:
remaining_commands = [
c
for c in get_visible_commands_starting_with(ctx, incomplete)
if c.name not in ctx.protected_args
]
completions_out.extend(
[(c.name, c.get_short_help_str()) for c in remaining_commands]
)
def get_choices(cli, prog_name, args, incomplete):
"""
:param cli: command definition
:param prog_name: the program that is running
:param args: full list of args
:param incomplete: the incomplete text to autocomplete
:return: all the possible completions for the incomplete
"""
all_args = copy.deepcopy(args)
ctx = resolve_ctx(cli, prog_name, args)
if ctx is None:
return []
has_double_dash = "--" in all_args
# In newer versions of bash long opts with '='s are partitioned, but
# it's easier to parse without the '='
if start_of_option(incomplete) and WORDBREAK in incomplete:
partition_incomplete = incomplete.partition(WORDBREAK)
all_args.append(partition_incomplete[0])
incomplete = partition_incomplete[2]
elif incomplete == WORDBREAK:
incomplete = ""
completions = []
if not has_double_dash and start_of_option(incomplete):
# completions for partial options
for param in ctx.command.params:
if isinstance(param, Option) and not param.hidden:
param_opts = [
param_opt
for param_opt in param.opts + param.secondary_opts
if param_opt not in all_args or param.multiple
]
completions.extend(
[(o, param.help) for o in param_opts if o.startswith(incomplete)]
)
return completions
# completion for option values from user supplied values
for param in ctx.command.params:
if is_incomplete_option(all_args, param):
return get_user_autocompletions(ctx, all_args, incomplete, param)
# completion for argument values from user supplied values
for param in ctx.command.params:
if is_incomplete_argument(ctx.params, param):
return get_user_autocompletions(ctx, all_args, incomplete, param)
add_subcommand_completions(ctx, incomplete, completions)
# Sort before returning so that proper ordering can be enforced in custom types.
return sorted(completions)
def do_complete(cli, prog_name, include_descriptions):
cwords = split_arg_string(os.environ["COMP_WORDS"])
cword = int(os.environ["COMP_CWORD"])
args = cwords[1:cword]
try:
incomplete = cwords[cword]
except IndexError:
incomplete = ""
for item in get_choices(cli, prog_name, args, incomplete):
echo(item[0])
if include_descriptions:
# ZSH has trouble dealing with empty array parameters when
# returned from commands, use '_' to indicate no description
# is present.
echo(item[1] if item[1] else "_")
return True
def do_complete_fish(cli, prog_name):
cwords = split_arg_string(os.environ["COMP_WORDS"])
incomplete = os.environ["COMP_CWORD"]
args = cwords[1:]
for item in get_choices(cli, prog_name, args, incomplete):
if item[1]:
echo("{arg}\t{desc}".format(arg=item[0], desc=item[1]))
else:
echo(item[0])
return True
def bashcomplete(cli, prog_name, complete_var, complete_instr):
if "_" in complete_instr:
command, shell = complete_instr.split("_", 1)
else:
command = complete_instr
shell = "bash"
if command == "source":
echo(get_completion_script(prog_name, complete_var, shell))
return True
elif command == "complete":
if shell == "fish":
return do_complete_fish(cli, prog_name)
elif shell in {"bash", "zsh"}:
return do_complete(cli, prog_name, shell == "zsh")
return False
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/click/exceptions.py
|
from ._compat import filename_to_ui
from ._compat import get_text_stderr
from ._compat import PY2
from .utils import echo
def _join_param_hints(param_hint):
if isinstance(param_hint, (tuple, list)):
return " / ".join(repr(x) for x in param_hint)
return param_hint
class ClickException(Exception):
"""An exception that Click can handle and show to the user."""
#: The exit code for this exception
exit_code = 1
def __init__(self, message):
ctor_msg = message
if PY2:
if ctor_msg is not None:
ctor_msg = ctor_msg.encode("utf-8")
Exception.__init__(self, ctor_msg)
self.message = message
def format_message(self):
return self.message
def __str__(self):
return self.message
if PY2:
__unicode__ = __str__
def __str__(self):
return self.message.encode("utf-8")
def show(self, file=None):
if file is None:
file = get_text_stderr()
echo("Error: {}".format(self.format_message()), file=file)
class UsageError(ClickException):
"""An internal exception that signals a usage error. This typically
aborts any further handling.
:param message: the error message to display.
:param ctx: optionally the context that caused this error. Click will
fill in the context automatically in some situations.
"""
exit_code = 2
def __init__(self, message, ctx=None):
ClickException.__init__(self, message)
self.ctx = ctx
self.cmd = self.ctx.command if self.ctx else None
def show(self, file=None):
if file is None:
file = get_text_stderr()
color = None
hint = ""
if self.cmd is not None and self.cmd.get_help_option(self.ctx) is not None:
hint = "Try '{} {}' for help.\n".format(
self.ctx.command_path, self.ctx.help_option_names[0]
)
if self.ctx is not None:
color = self.ctx.color
echo("{}\n{}".format(self.ctx.get_usage(), hint), file=file, color=color)
echo("Error: {}".format(self.format_message()), file=file, color=color)
class BadParameter(UsageError):
"""An exception that formats out a standardized error message for a
bad parameter. This is useful when thrown from a callback or type as
Click will attach contextual information to it (for instance, which
parameter it is).
.. versionadded:: 2.0
:param param: the parameter object that caused this error. This can
be left out, and Click will attach this info itself
if possible.
:param param_hint: a string that shows up as parameter name. This
can be used as alternative to `param` in cases
where custom validation should happen. If it is
a string it's used as such, if it's a list then
each item is quoted and separated.
"""
def __init__(self, message, ctx=None, param=None, param_hint=None):
UsageError.__init__(self, message, ctx)
self.param = param
self.param_hint = param_hint
def format_message(self):
if self.param_hint is not None:
param_hint = self.param_hint
elif self.param is not None:
param_hint = self.param.get_error_hint(self.ctx)
else:
return "Invalid value: {}".format(self.message)
param_hint = _join_param_hints(param_hint)
return "Invalid value for {}: {}".format(param_hint, self.message)
class MissingParameter(BadParameter):
"""Raised if click required an option or argument but it was not
provided when invoking the script.
.. versionadded:: 4.0
:param param_type: a string that indicates the type of the parameter.
The default is to inherit the parameter type from
the given `param`. Valid values are ``'parameter'``,
``'option'`` or ``'argument'``.
"""
def __init__(
self, message=None, ctx=None, param=None, param_hint=None, param_type=None
):
BadParameter.__init__(self, message, ctx, param, param_hint)
self.param_type = param_type
def format_message(self):
if self.param_hint is not None:
param_hint = self.param_hint
elif self.param is not None:
param_hint = self.param.get_error_hint(self.ctx)
else:
param_hint = None
param_hint = _join_param_hints(param_hint)
param_type = self.param_type
if param_type is None and self.param is not None:
param_type = self.param.param_type_name
msg = self.message
if self.param is not None:
msg_extra = self.param.type.get_missing_message(self.param)
if msg_extra:
if msg:
msg += ". {}".format(msg_extra)
else:
msg = msg_extra
return "Missing {}{}{}{}".format(
param_type,
" {}".format(param_hint) if param_hint else "",
". " if msg else ".",
msg or "",
)
def __str__(self):
if self.message is None:
param_name = self.param.name if self.param else None
return "missing parameter: {}".format(param_name)
else:
return self.message
if PY2:
__unicode__ = __str__
def __str__(self):
return self.__unicode__().encode("utf-8")
class NoSuchOption(UsageError):
"""Raised if click attempted to handle an option that does not
exist.
.. versionadded:: 4.0
"""
def __init__(self, option_name, message=None, possibilities=None, ctx=None):
if message is None:
message = "no such option: {}".format(option_name)
UsageError.__init__(self, message, ctx)
self.option_name = option_name
self.possibilities = possibilities
def format_message(self):
bits = [self.message]
if self.possibilities:
if len(self.possibilities) == 1:
bits.append("Did you mean {}?".format(self.possibilities[0]))
else:
possibilities = sorted(self.possibilities)
bits.append("(Possible options: {})".format(", ".join(possibilities)))
return " ".join(bits)
class BadOptionUsage(UsageError):
"""Raised if an option is generally supplied but the use of the option
was incorrect. This is for instance raised if the number of arguments
for an option is not correct.
.. versionadded:: 4.0
:param option_name: the name of the option being used incorrectly.
"""
def __init__(self, option_name, message, ctx=None):
UsageError.__init__(self, message, ctx)
self.option_name = option_name
class BadArgumentUsage(UsageError):
"""Raised if an argument is generally supplied but the use of the argument
was incorrect. This is for instance raised if the number of values
for an argument is not correct.
.. versionadded:: 6.0
"""
def __init__(self, message, ctx=None):
UsageError.__init__(self, message, ctx)
class FileError(ClickException):
"""Raised if a file cannot be opened."""
def __init__(self, filename, hint=None):
ui_filename = filename_to_ui(filename)
if hint is None:
hint = "unknown error"
ClickException.__init__(self, hint)
self.ui_filename = ui_filename
self.filename = filename
def format_message(self):
return "Could not open file {}: {}".format(self.ui_filename, self.message)
class Abort(RuntimeError):
"""An internal signalling exception that signals Click to abort."""
class Exit(RuntimeError):
"""An exception that indicates that the application should exit with some
status code.
:param code: the status code to exit with.
"""
__slots__ = ("exit_code",)
def __init__(self, code=0):
self.exit_code = code
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/click/_compat.py
|
# flake8: noqa
import codecs
import io
import os
import re
import sys
from weakref import WeakKeyDictionary
PY2 = sys.version_info[0] == 2
CYGWIN = sys.platform.startswith("cygwin")
MSYS2 = sys.platform.startswith("win") and ("GCC" in sys.version)
# Determine local App Engine environment, per Google's own suggestion
APP_ENGINE = "APPENGINE_RUNTIME" in os.environ and "Development/" in os.environ.get(
"SERVER_SOFTWARE", ""
)
WIN = sys.platform.startswith("win") and not APP_ENGINE and not MSYS2
DEFAULT_COLUMNS = 80
_ansi_re = re.compile(r"\033\[[;?0-9]*[a-zA-Z]")
def get_filesystem_encoding():
return sys.getfilesystemencoding() or sys.getdefaultencoding()
def _make_text_stream(
stream, encoding, errors, force_readable=False, force_writable=False
):
if encoding is None:
encoding = get_best_encoding(stream)
if errors is None:
errors = "replace"
return _NonClosingTextIOWrapper(
stream,
encoding,
errors,
line_buffering=True,
force_readable=force_readable,
force_writable=force_writable,
)
def is_ascii_encoding(encoding):
"""Checks if a given encoding is ascii."""
try:
return codecs.lookup(encoding).name == "ascii"
except LookupError:
return False
def get_best_encoding(stream):
"""Returns the default stream encoding if not found."""
rv = getattr(stream, "encoding", None) or sys.getdefaultencoding()
if is_ascii_encoding(rv):
return "utf-8"
return rv
class _NonClosingTextIOWrapper(io.TextIOWrapper):
def __init__(
self,
stream,
encoding,
errors,
force_readable=False,
force_writable=False,
**extra
):
self._stream = stream = _FixupStream(stream, force_readable, force_writable)
io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra)
# The io module is a place where the Python 3 text behavior
# was forced upon Python 2, so we need to unbreak
# it to look like Python 2.
if PY2:
def write(self, x):
if isinstance(x, str) or is_bytes(x):
try:
self.flush()
except Exception:
pass
return self.buffer.write(str(x))
return io.TextIOWrapper.write(self, x)
def writelines(self, lines):
for line in lines:
self.write(line)
def __del__(self):
try:
self.detach()
except Exception:
pass
def isatty(self):
# https://bitbucket.org/pypy/pypy/issue/1803
return self._stream.isatty()
class _FixupStream(object):
"""The new io interface needs more from streams than streams
traditionally implement. As such, this fix-up code is necessary in
some circumstances.
The forcing of readable and writable flags are there because some tools
put badly patched objects on sys (one such offender are certain version
of jupyter notebook).
"""
def __init__(self, stream, force_readable=False, force_writable=False):
self._stream = stream
self._force_readable = force_readable
self._force_writable = force_writable
def __getattr__(self, name):
return getattr(self._stream, name)
def read1(self, size):
f = getattr(self._stream, "read1", None)
if f is not None:
return f(size)
# We only dispatch to readline instead of read in Python 2 as we
# do not want cause problems with the different implementation
# of line buffering.
if PY2:
return self._stream.readline(size)
return self._stream.read(size)
def readable(self):
if self._force_readable:
return True
x = getattr(self._stream, "readable", None)
if x is not None:
return x()
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
if self._force_writable:
return True
x = getattr(self._stream, "writable", None)
if x is not None:
return x()
try:
self._stream.write("")
except Exception:
try:
self._stream.write(b"")
except Exception:
return False
return True
def seekable(self):
x = getattr(self._stream, "seekable", None)
if x is not None:
return x()
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
if PY2:
text_type = unicode
raw_input = raw_input
string_types = (str, unicode)
int_types = (int, long)
iteritems = lambda x: x.iteritems()
range_type = xrange
def is_bytes(x):
return isinstance(x, (buffer, bytearray))
_identifier_re = re.compile(r"^[a-zA-Z_][a-zA-Z0-9_]*$")
# For Windows, we need to force stdout/stdin/stderr to binary if it's
# fetched for that. This obviously is not the most correct way to do
# it as it changes global state. Unfortunately, there does not seem to
# be a clear better way to do it as just reopening the file in binary
# mode does not change anything.
#
# An option would be to do what Python 3 does and to open the file as
# binary only, patch it back to the system, and then use a wrapper
# stream that converts newlines. It's not quite clear what's the
# correct option here.
#
# This code also lives in _winconsole for the fallback to the console
# emulation stream.
#
# There are also Windows environments where the `msvcrt` module is not
# available (which is why we use try-catch instead of the WIN variable
# here), such as the Google App Engine development server on Windows. In
# those cases there is just nothing we can do.
def set_binary_mode(f):
return f
try:
import msvcrt
except ImportError:
pass
else:
def set_binary_mode(f):
try:
fileno = f.fileno()
except Exception:
pass
else:
msvcrt.setmode(fileno, os.O_BINARY)
return f
try:
import fcntl
except ImportError:
pass
else:
def set_binary_mode(f):
try:
fileno = f.fileno()
except Exception:
pass
else:
flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
fcntl.fcntl(fileno, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)
return f
def isidentifier(x):
return _identifier_re.search(x) is not None
def get_binary_stdin():
return set_binary_mode(sys.stdin)
def get_binary_stdout():
_wrap_std_stream("stdout")
return set_binary_mode(sys.stdout)
def get_binary_stderr():
_wrap_std_stream("stderr")
return set_binary_mode(sys.stderr)
def get_text_stdin(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stdin, encoding, errors, force_readable=True)
def get_text_stdout(encoding=None, errors=None):
_wrap_std_stream("stdout")
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stdout, encoding, errors, force_writable=True)
def get_text_stderr(encoding=None, errors=None):
_wrap_std_stream("stderr")
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stderr, encoding, errors, force_writable=True)
def filename_to_ui(value):
if isinstance(value, bytes):
value = value.decode(get_filesystem_encoding(), "replace")
return value
else:
import io
text_type = str
raw_input = input
string_types = (str,)
int_types = (int,)
range_type = range
isidentifier = lambda x: x.isidentifier()
iteritems = lambda x: iter(x.items())
def is_bytes(x):
return isinstance(x, (bytes, memoryview, bytearray))
def _is_binary_reader(stream, default=False):
try:
return isinstance(stream.read(0), bytes)
except Exception:
return default
# This happens in some cases where the stream was already
# closed. In this case, we assume the default.
def _is_binary_writer(stream, default=False):
try:
stream.write(b"")
except Exception:
try:
stream.write("")
return False
except Exception:
pass
return default
return True
def _find_binary_reader(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detaching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_reader(stream, False):
return stream
buf = getattr(stream, "buffer", None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_reader(buf, True):
return buf
def _find_binary_writer(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detatching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_writer(stream, False):
return stream
buf = getattr(stream, "buffer", None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_writer(buf, True):
return buf
def _stream_is_misconfigured(stream):
"""A stream is misconfigured if its encoding is ASCII."""
# If the stream does not have an encoding set, we assume it's set
# to ASCII. This appears to happen in certain unittest
# environments. It's not quite clear what the correct behavior is
# but this at least will force Click to recover somehow.
return is_ascii_encoding(getattr(stream, "encoding", None) or "ascii")
def _is_compat_stream_attr(stream, attr, value):
"""A stream attribute is compatible if it is equal to the
desired value or the desired value is unset and the attribute
has a value.
"""
stream_value = getattr(stream, attr, None)
return stream_value == value or (value is None and stream_value is not None)
def _is_compatible_text_stream(stream, encoding, errors):
"""Check if a stream's encoding and errors attributes are
compatible with the desired values.
"""
return _is_compat_stream_attr(
stream, "encoding", encoding
) and _is_compat_stream_attr(stream, "errors", errors)
def _force_correct_text_stream(
text_stream,
encoding,
errors,
is_binary,
find_binary,
force_readable=False,
force_writable=False,
):
if is_binary(text_stream, False):
binary_reader = text_stream
else:
# If the stream looks compatible, and won't default to a
# misconfigured ascii encoding, return it as-is.
if _is_compatible_text_stream(text_stream, encoding, errors) and not (
encoding is None and _stream_is_misconfigured(text_stream)
):
return text_stream
# Otherwise, get the underlying binary reader.
binary_reader = find_binary(text_stream)
# If that's not possible, silently use the original reader
# and get mojibake instead of exceptions.
if binary_reader is None:
return text_stream
# Default errors to replace instead of strict in order to get
# something that works.
if errors is None:
errors = "replace"
# Wrap the binary stream in a text stream with the correct
# encoding parameters.
return _make_text_stream(
binary_reader,
encoding,
errors,
force_readable=force_readable,
force_writable=force_writable,
)
def _force_correct_text_reader(text_reader, encoding, errors, force_readable=False):
return _force_correct_text_stream(
text_reader,
encoding,
errors,
_is_binary_reader,
_find_binary_reader,
force_readable=force_readable,
)
def _force_correct_text_writer(text_writer, encoding, errors, force_writable=False):
return _force_correct_text_stream(
text_writer,
encoding,
errors,
_is_binary_writer,
_find_binary_writer,
force_writable=force_writable,
)
def get_binary_stdin():
reader = _find_binary_reader(sys.stdin)
if reader is None:
raise RuntimeError("Was not able to determine binary stream for sys.stdin.")
return reader
def get_binary_stdout():
writer = _find_binary_writer(sys.stdout)
if writer is None:
raise RuntimeError(
"Was not able to determine binary stream for sys.stdout."
)
return writer
def get_binary_stderr():
writer = _find_binary_writer(sys.stderr)
if writer is None:
raise RuntimeError(
"Was not able to determine binary stream for sys.stderr."
)
return writer
def get_text_stdin(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_reader(
sys.stdin, encoding, errors, force_readable=True
)
def get_text_stdout(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(
sys.stdout, encoding, errors, force_writable=True
)
def get_text_stderr(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(
sys.stderr, encoding, errors, force_writable=True
)
def filename_to_ui(value):
if isinstance(value, bytes):
value = value.decode(get_filesystem_encoding(), "replace")
else:
value = value.encode("utf-8", "surrogateescape").decode("utf-8", "replace")
return value
def get_streerror(e, default=None):
if hasattr(e, "strerror"):
msg = e.strerror
else:
if default is not None:
msg = default
else:
msg = str(e)
if isinstance(msg, bytes):
msg = msg.decode("utf-8", "replace")
return msg
def _wrap_io_open(file, mode, encoding, errors):
"""On Python 2, :func:`io.open` returns a text file wrapper that
requires passing ``unicode`` to ``write``. Need to open the file in
binary mode then wrap it in a subclass that can write ``str`` and
``unicode``.
Also handles not passing ``encoding`` and ``errors`` in binary mode.
"""
binary = "b" in mode
if binary:
kwargs = {}
else:
kwargs = {"encoding": encoding, "errors": errors}
if not PY2 or binary:
return io.open(file, mode, **kwargs)
f = io.open(file, "{}b".format(mode.replace("t", "")))
return _make_text_stream(f, **kwargs)
def open_stream(filename, mode="r", encoding=None, errors="strict", atomic=False):
binary = "b" in mode
# Standard streams first. These are simple because they don't need
# special handling for the atomic flag. It's entirely ignored.
if filename == "-":
if any(m in mode for m in ["w", "a", "x"]):
if binary:
return get_binary_stdout(), False
return get_text_stdout(encoding=encoding, errors=errors), False
if binary:
return get_binary_stdin(), False
return get_text_stdin(encoding=encoding, errors=errors), False
# Non-atomic writes directly go out through the regular open functions.
if not atomic:
return _wrap_io_open(filename, mode, encoding, errors), True
# Some usability stuff for atomic writes
if "a" in mode:
raise ValueError(
"Appending to an existing file is not supported, because that"
" would involve an expensive `copy`-operation to a temporary"
" file. Open the file in normal `w`-mode and copy explicitly"
" if that's what you're after."
)
if "x" in mode:
raise ValueError("Use the `overwrite`-parameter instead.")
if "w" not in mode:
raise ValueError("Atomic writes only make sense with `w`-mode.")
# Atomic writes are more complicated. They work by opening a file
# as a proxy in the same folder and then using the fdopen
# functionality to wrap it in a Python file. Then we wrap it in an
# atomic file that moves the file over on close.
import errno
import random
try:
perm = os.stat(filename).st_mode
except OSError:
perm = None
flags = os.O_RDWR | os.O_CREAT | os.O_EXCL
if binary:
flags |= getattr(os, "O_BINARY", 0)
while True:
tmp_filename = os.path.join(
os.path.dirname(filename),
".__atomic-write{:08x}".format(random.randrange(1 << 32)),
)
try:
fd = os.open(tmp_filename, flags, 0o666 if perm is None else perm)
break
except OSError as e:
if e.errno == errno.EEXIST or (
os.name == "nt"
and e.errno == errno.EACCES
and os.path.isdir(e.filename)
and os.access(e.filename, os.W_OK)
):
continue
raise
if perm is not None:
os.chmod(tmp_filename, perm) # in case perm includes bits in umask
f = _wrap_io_open(fd, mode, encoding, errors)
return _AtomicFile(f, tmp_filename, os.path.realpath(filename)), True
# Used in a destructor call, needs extra protection from interpreter cleanup.
if hasattr(os, "replace"):
_replace = os.replace
_can_replace = True
else:
_replace = os.rename
_can_replace = not WIN
class _AtomicFile(object):
def __init__(self, f, tmp_filename, real_filename):
self._f = f
self._tmp_filename = tmp_filename
self._real_filename = real_filename
self.closed = False
@property
def name(self):
return self._real_filename
def close(self, delete=False):
if self.closed:
return
self._f.close()
if not _can_replace:
try:
os.remove(self._real_filename)
except OSError:
pass
_replace(self._tmp_filename, self._real_filename)
self.closed = True
def __getattr__(self, name):
return getattr(self._f, name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close(delete=exc_type is not None)
def __repr__(self):
return repr(self._f)
auto_wrap_for_ansi = None
colorama = None
get_winterm_size = None
def strip_ansi(value):
return _ansi_re.sub("", value)
def _is_jupyter_kernel_output(stream):
if WIN:
# TODO: Couldn't test on Windows, should't try to support until
# someone tests the details wrt colorama.
return
while isinstance(stream, (_FixupStream, _NonClosingTextIOWrapper)):
stream = stream._stream
return stream.__class__.__module__.startswith("ipykernel.")
def should_strip_ansi(stream=None, color=None):
if color is None:
if stream is None:
stream = sys.stdin
return not isatty(stream) and not _is_jupyter_kernel_output(stream)
return not color
# If we're on Windows, we provide transparent integration through
# colorama. This will make ANSI colors through the echo function
# work automatically.
if WIN:
# Windows has a smaller terminal
DEFAULT_COLUMNS = 79
from ._winconsole import _get_windows_console_stream, _wrap_std_stream
def _get_argv_encoding():
import locale
return locale.getpreferredencoding()
if PY2:
def raw_input(prompt=""):
sys.stderr.flush()
if prompt:
stdout = _default_text_stdout()
stdout.write(prompt)
stdin = _default_text_stdin()
return stdin.readline().rstrip("\r\n")
try:
import colorama
except ImportError:
pass
else:
_ansi_stream_wrappers = WeakKeyDictionary()
def auto_wrap_for_ansi(stream, color=None):
"""This function wraps a stream so that calls through colorama
are issued to the win32 console API to recolor on demand. It
also ensures to reset the colors if a write call is interrupted
to not destroy the console afterwards.
"""
try:
cached = _ansi_stream_wrappers.get(stream)
except Exception:
cached = None
if cached is not None:
return cached
strip = should_strip_ansi(stream, color)
ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
rv = ansi_wrapper.stream
_write = rv.write
def _safe_write(s):
try:
return _write(s)
except:
ansi_wrapper.reset_all()
raise
rv.write = _safe_write
try:
_ansi_stream_wrappers[stream] = rv
except Exception:
pass
return rv
def get_winterm_size():
win = colorama.win32.GetConsoleScreenBufferInfo(
colorama.win32.STDOUT
).srWindow
return win.Right - win.Left, win.Bottom - win.Top
else:
def _get_argv_encoding():
return getattr(sys.stdin, "encoding", None) or get_filesystem_encoding()
_get_windows_console_stream = lambda *x: None
_wrap_std_stream = lambda *x: None
def term_len(x):
return len(strip_ansi(x))
def isatty(stream):
try:
return stream.isatty()
except Exception:
return False
def _make_cached_stream_func(src_func, wrapper_func):
cache = WeakKeyDictionary()
def func():
stream = src_func()
try:
rv = cache.get(stream)
except Exception:
rv = None
if rv is not None:
return rv
rv = wrapper_func()
try:
stream = src_func() # In case wrapper_func() modified the stream
cache[stream] = rv
except Exception:
pass
return rv
return func
_default_text_stdin = _make_cached_stream_func(lambda: sys.stdin, get_text_stdin)
_default_text_stdout = _make_cached_stream_func(lambda: sys.stdout, get_text_stdout)
_default_text_stderr = _make_cached_stream_func(lambda: sys.stderr, get_text_stderr)
binary_streams = {
"stdin": get_binary_stdin,
"stdout": get_binary_stdout,
"stderr": get_binary_stderr,
}
text_streams = {
"stdin": get_text_stdin,
"stdout": get_text_stdout,
"stderr": get_text_stderr,
}
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/click/_termui_impl.py
|
# -*- coding: utf-8 -*-
"""
This module contains implementations for the termui module. To keep the
import time of Click down, some infrequently used functionality is
placed in this module and only imported as needed.
"""
import contextlib
import math
import os
import sys
import time
from ._compat import _default_text_stdout
from ._compat import CYGWIN
from ._compat import get_best_encoding
from ._compat import int_types
from ._compat import isatty
from ._compat import open_stream
from ._compat import range_type
from ._compat import strip_ansi
from ._compat import term_len
from ._compat import WIN
from .exceptions import ClickException
from .utils import echo
if os.name == "nt":
BEFORE_BAR = "\r"
AFTER_BAR = "\n"
else:
BEFORE_BAR = "\r\033[?25l"
AFTER_BAR = "\033[?25h\n"
def _length_hint(obj):
"""Returns the length hint of an object."""
try:
return len(obj)
except (AttributeError, TypeError):
try:
get_hint = type(obj).__length_hint__
except AttributeError:
return None
try:
hint = get_hint(obj)
except TypeError:
return None
if hint is NotImplemented or not isinstance(hint, int_types) or hint < 0:
return None
return hint
class ProgressBar(object):
def __init__(
self,
iterable,
length=None,
fill_char="#",
empty_char=" ",
bar_template="%(bar)s",
info_sep=" ",
show_eta=True,
show_percent=None,
show_pos=False,
item_show_func=None,
label=None,
file=None,
color=None,
width=30,
):
self.fill_char = fill_char
self.empty_char = empty_char
self.bar_template = bar_template
self.info_sep = info_sep
self.show_eta = show_eta
self.show_percent = show_percent
self.show_pos = show_pos
self.item_show_func = item_show_func
self.label = label or ""
if file is None:
file = _default_text_stdout()
self.file = file
self.color = color
self.width = width
self.autowidth = width == 0
if length is None:
length = _length_hint(iterable)
if iterable is None:
if length is None:
raise TypeError("iterable or length is required")
iterable = range_type(length)
self.iter = iter(iterable)
self.length = length
self.length_known = length is not None
self.pos = 0
self.avg = []
self.start = self.last_eta = time.time()
self.eta_known = False
self.finished = False
self.max_width = None
self.entered = False
self.current_item = None
self.is_hidden = not isatty(self.file)
self._last_line = None
self.short_limit = 0.5
def __enter__(self):
self.entered = True
self.render_progress()
return self
def __exit__(self, exc_type, exc_value, tb):
self.render_finish()
def __iter__(self):
if not self.entered:
raise RuntimeError("You need to use progress bars in a with block.")
self.render_progress()
return self.generator()
def __next__(self):
# Iteration is defined in terms of a generator function,
# returned by iter(self); use that to define next(). This works
# because `self.iter` is an iterable consumed by that generator,
# so it is re-entry safe. Calling `next(self.generator())`
# twice works and does "what you want".
return next(iter(self))
# Python 2 compat
next = __next__
def is_fast(self):
return time.time() - self.start <= self.short_limit
def render_finish(self):
if self.is_hidden or self.is_fast():
return
self.file.write(AFTER_BAR)
self.file.flush()
@property
def pct(self):
if self.finished:
return 1.0
return min(self.pos / (float(self.length) or 1), 1.0)
@property
def time_per_iteration(self):
if not self.avg:
return 0.0
return sum(self.avg) / float(len(self.avg))
@property
def eta(self):
if self.length_known and not self.finished:
return self.time_per_iteration * (self.length - self.pos)
return 0.0
def format_eta(self):
if self.eta_known:
t = int(self.eta)
seconds = t % 60
t //= 60
minutes = t % 60
t //= 60
hours = t % 24
t //= 24
if t > 0:
return "{}d {:02}:{:02}:{:02}".format(t, hours, minutes, seconds)
else:
return "{:02}:{:02}:{:02}".format(hours, minutes, seconds)
return ""
def format_pos(self):
pos = str(self.pos)
if self.length_known:
pos += "/{}".format(self.length)
return pos
def format_pct(self):
return "{: 4}%".format(int(self.pct * 100))[1:]
def format_bar(self):
if self.length_known:
bar_length = int(self.pct * self.width)
bar = self.fill_char * bar_length
bar += self.empty_char * (self.width - bar_length)
elif self.finished:
bar = self.fill_char * self.width
else:
bar = list(self.empty_char * (self.width or 1))
if self.time_per_iteration != 0:
bar[
int(
(math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5)
* self.width
)
] = self.fill_char
bar = "".join(bar)
return bar
def format_progress_line(self):
show_percent = self.show_percent
info_bits = []
if self.length_known and show_percent is None:
show_percent = not self.show_pos
if self.show_pos:
info_bits.append(self.format_pos())
if show_percent:
info_bits.append(self.format_pct())
if self.show_eta and self.eta_known and not self.finished:
info_bits.append(self.format_eta())
if self.item_show_func is not None:
item_info = self.item_show_func(self.current_item)
if item_info is not None:
info_bits.append(item_info)
return (
self.bar_template
% {
"label": self.label,
"bar": self.format_bar(),
"info": self.info_sep.join(info_bits),
}
).rstrip()
def render_progress(self):
from .termui import get_terminal_size
if self.is_hidden:
return
buf = []
# Update width in case the terminal has been resized
if self.autowidth:
old_width = self.width
self.width = 0
clutter_length = term_len(self.format_progress_line())
new_width = max(0, get_terminal_size()[0] - clutter_length)
if new_width < old_width:
buf.append(BEFORE_BAR)
buf.append(" " * self.max_width)
self.max_width = new_width
self.width = new_width
clear_width = self.width
if self.max_width is not None:
clear_width = self.max_width
buf.append(BEFORE_BAR)
line = self.format_progress_line()
line_len = term_len(line)
if self.max_width is None or self.max_width < line_len:
self.max_width = line_len
buf.append(line)
buf.append(" " * (clear_width - line_len))
line = "".join(buf)
# Render the line only if it changed.
if line != self._last_line and not self.is_fast():
self._last_line = line
echo(line, file=self.file, color=self.color, nl=False)
self.file.flush()
def make_step(self, n_steps):
self.pos += n_steps
if self.length_known and self.pos >= self.length:
self.finished = True
if (time.time() - self.last_eta) < 1.0:
return
self.last_eta = time.time()
# self.avg is a rolling list of length <= 7 of steps where steps are
# defined as time elapsed divided by the total progress through
# self.length.
if self.pos:
step = (time.time() - self.start) / self.pos
else:
step = time.time() - self.start
self.avg = self.avg[-6:] + [step]
self.eta_known = self.length_known
def update(self, n_steps):
self.make_step(n_steps)
self.render_progress()
def finish(self):
self.eta_known = 0
self.current_item = None
self.finished = True
def generator(self):
"""Return a generator which yields the items added to the bar
during construction, and updates the progress bar *after* the
yielded block returns.
"""
# WARNING: the iterator interface for `ProgressBar` relies on
# this and only works because this is a simple generator which
# doesn't create or manage additional state. If this function
# changes, the impact should be evaluated both against
# `iter(bar)` and `next(bar)`. `next()` in particular may call
# `self.generator()` repeatedly, and this must remain safe in
# order for that interface to work.
if not self.entered:
raise RuntimeError("You need to use progress bars in a with block.")
if self.is_hidden:
for rv in self.iter:
yield rv
else:
for rv in self.iter:
self.current_item = rv
yield rv
self.update(1)
self.finish()
self.render_progress()
def pager(generator, color=None):
"""Decide what method to use for paging through text."""
stdout = _default_text_stdout()
if not isatty(sys.stdin) or not isatty(stdout):
return _nullpager(stdout, generator, color)
pager_cmd = (os.environ.get("PAGER", None) or "").strip()
if pager_cmd:
if WIN:
return _tempfilepager(generator, pager_cmd, color)
return _pipepager(generator, pager_cmd, color)
if os.environ.get("TERM") in ("dumb", "emacs"):
return _nullpager(stdout, generator, color)
if WIN or sys.platform.startswith("os2"):
return _tempfilepager(generator, "more <", color)
if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0:
return _pipepager(generator, "less", color)
import tempfile
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, "system") and os.system('more "{}"'.format(filename)) == 0:
return _pipepager(generator, "more", color)
return _nullpager(stdout, generator, color)
finally:
os.unlink(filename)
def _pipepager(generator, cmd, color):
"""Page through text by feeding it to another program. Invoking a
pager through this might support colors.
"""
import subprocess
env = dict(os.environ)
# If we're piping to less we might support colors under the
# condition that
cmd_detail = cmd.rsplit("/", 1)[-1].split()
if color is None and cmd_detail[0] == "less":
less_flags = "{}{}".format(os.environ.get("LESS", ""), " ".join(cmd_detail[1:]))
if not less_flags:
env["LESS"] = "-R"
color = True
elif "r" in less_flags or "R" in less_flags:
color = True
c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, env=env)
encoding = get_best_encoding(c.stdin)
try:
for text in generator:
if not color:
text = strip_ansi(text)
c.stdin.write(text.encode(encoding, "replace"))
except (IOError, KeyboardInterrupt):
pass
else:
c.stdin.close()
# Less doesn't respect ^C, but catches it for its own UI purposes (aborting
# search or other commands inside less).
#
# That means when the user hits ^C, the parent process (click) terminates,
# but less is still alive, paging the output and messing up the terminal.
#
# If the user wants to make the pager exit on ^C, they should set
# `LESS='-K'`. It's not our decision to make.
while True:
try:
c.wait()
except KeyboardInterrupt:
pass
else:
break
def _tempfilepager(generator, cmd, color):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
# TODO: This never terminates if the passed generator never terminates.
text = "".join(generator)
if not color:
text = strip_ansi(text)
encoding = get_best_encoding(sys.stdout)
with open_stream(filename, "wb")[0] as f:
f.write(text.encode(encoding))
try:
os.system('{} "{}"'.format(cmd, filename))
finally:
os.unlink(filename)
def _nullpager(stream, generator, color):
"""Simply print unformatted text. This is the ultimate fallback."""
for text in generator:
if not color:
text = strip_ansi(text)
stream.write(text)
class Editor(object):
def __init__(self, editor=None, env=None, require_save=True, extension=".txt"):
self.editor = editor
self.env = env
self.require_save = require_save
self.extension = extension
def get_editor(self):
if self.editor is not None:
return self.editor
for key in "VISUAL", "EDITOR":
rv = os.environ.get(key)
if rv:
return rv
if WIN:
return "notepad"
for editor in "sensible-editor", "vim", "nano":
if os.system("which {} >/dev/null 2>&1".format(editor)) == 0:
return editor
return "vi"
def edit_file(self, filename):
import subprocess
editor = self.get_editor()
if self.env:
environ = os.environ.copy()
environ.update(self.env)
else:
environ = None
try:
c = subprocess.Popen(
'{} "{}"'.format(editor, filename), env=environ, shell=True,
)
exit_code = c.wait()
if exit_code != 0:
raise ClickException("{}: Editing failed!".format(editor))
except OSError as e:
raise ClickException("{}: Editing failed: {}".format(editor, e))
def edit(self, text):
import tempfile
text = text or ""
if text and not text.endswith("\n"):
text += "\n"
fd, name = tempfile.mkstemp(prefix="editor-", suffix=self.extension)
try:
if WIN:
encoding = "utf-8-sig"
text = text.replace("\n", "\r\n")
else:
encoding = "utf-8"
text = text.encode(encoding)
f = os.fdopen(fd, "wb")
f.write(text)
f.close()
timestamp = os.path.getmtime(name)
self.edit_file(name)
if self.require_save and os.path.getmtime(name) == timestamp:
return None
f = open(name, "rb")
try:
rv = f.read()
finally:
f.close()
return rv.decode("utf-8-sig").replace("\r\n", "\n")
finally:
os.unlink(name)
def open_url(url, wait=False, locate=False):
import subprocess
def _unquote_file(url):
try:
import urllib
except ImportError:
import urllib
if url.startswith("file://"):
url = urllib.unquote(url[7:])
return url
if sys.platform == "darwin":
args = ["open"]
if wait:
args.append("-W")
if locate:
args.append("-R")
args.append(_unquote_file(url))
null = open("/dev/null", "w")
try:
return subprocess.Popen(args, stderr=null).wait()
finally:
null.close()
elif WIN:
if locate:
url = _unquote_file(url)
args = 'explorer /select,"{}"'.format(_unquote_file(url.replace('"', "")))
else:
args = 'start {} "" "{}"'.format(
"/WAIT" if wait else "", url.replace('"', "")
)
return os.system(args)
elif CYGWIN:
if locate:
url = _unquote_file(url)
args = 'cygstart "{}"'.format(os.path.dirname(url).replace('"', ""))
else:
args = 'cygstart {} "{}"'.format("-w" if wait else "", url.replace('"', ""))
return os.system(args)
try:
if locate:
url = os.path.dirname(_unquote_file(url)) or "."
else:
url = _unquote_file(url)
c = subprocess.Popen(["xdg-open", url])
if wait:
return c.wait()
return 0
except OSError:
if url.startswith(("http://", "https://")) and not locate and not wait:
import webbrowser
webbrowser.open(url)
return 0
return 1
def _translate_ch_to_exc(ch):
if ch == u"\x03":
raise KeyboardInterrupt()
if ch == u"\x04" and not WIN: # Unix-like, Ctrl+D
raise EOFError()
if ch == u"\x1a" and WIN: # Windows, Ctrl+Z
raise EOFError()
if WIN:
import msvcrt
@contextlib.contextmanager
def raw_terminal():
yield
def getchar(echo):
# The function `getch` will return a bytes object corresponding to
# the pressed character. Since Windows 10 build 1803, it will also
# return \x00 when called a second time after pressing a regular key.
#
# `getwch` does not share this probably-bugged behavior. Moreover, it
# returns a Unicode object by default, which is what we want.
#
# Either of these functions will return \x00 or \xe0 to indicate
# a special key, and you need to call the same function again to get
# the "rest" of the code. The fun part is that \u00e0 is
# "latin small letter a with grave", so if you type that on a French
# keyboard, you _also_ get a \xe0.
# E.g., consider the Up arrow. This returns \xe0 and then \x48. The
# resulting Unicode string reads as "a with grave" + "capital H".
# This is indistinguishable from when the user actually types
# "a with grave" and then "capital H".
#
# When \xe0 is returned, we assume it's part of a special-key sequence
# and call `getwch` again, but that means that when the user types
# the \u00e0 character, `getchar` doesn't return until a second
# character is typed.
# The alternative is returning immediately, but that would mess up
# cross-platform handling of arrow keys and others that start with
# \xe0. Another option is using `getch`, but then we can't reliably
# read non-ASCII characters, because return values of `getch` are
# limited to the current 8-bit codepage.
#
# Anyway, Click doesn't claim to do this Right(tm), and using `getwch`
# is doing the right thing in more situations than with `getch`.
if echo:
func = msvcrt.getwche
else:
func = msvcrt.getwch
rv = func()
if rv in (u"\x00", u"\xe0"):
# \x00 and \xe0 are control characters that indicate special key,
# see above.
rv += func()
_translate_ch_to_exc(rv)
return rv
else:
import tty
import termios
@contextlib.contextmanager
def raw_terminal():
if not isatty(sys.stdin):
f = open("/dev/tty")
fd = f.fileno()
else:
fd = sys.stdin.fileno()
f = None
try:
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
yield fd
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
sys.stdout.flush()
if f is not None:
f.close()
except termios.error:
pass
def getchar(echo):
with raw_terminal() as fd:
ch = os.read(fd, 32)
ch = ch.decode(get_best_encoding(sys.stdin), "replace")
if echo and isatty(sys.stdout):
sys.stdout.write(ch)
_translate_ch_to_exc(ch)
return ch
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/click/testing.py
|
import contextlib
import os
import shlex
import shutil
import sys
import tempfile
from . import formatting
from . import termui
from . import utils
from ._compat import iteritems
from ._compat import PY2
from ._compat import string_types
if PY2:
from cStringIO import StringIO
else:
import io
from ._compat import _find_binary_reader
class EchoingStdin(object):
def __init__(self, input, output):
self._input = input
self._output = output
def __getattr__(self, x):
return getattr(self._input, x)
def _echo(self, rv):
self._output.write(rv)
return rv
def read(self, n=-1):
return self._echo(self._input.read(n))
def readline(self, n=-1):
return self._echo(self._input.readline(n))
def readlines(self):
return [self._echo(x) for x in self._input.readlines()]
def __iter__(self):
return iter(self._echo(x) for x in self._input)
def __repr__(self):
return repr(self._input)
def make_input_stream(input, charset):
# Is already an input stream.
if hasattr(input, "read"):
if PY2:
return input
rv = _find_binary_reader(input)
if rv is not None:
return rv
raise TypeError("Could not find binary reader for input stream.")
if input is None:
input = b""
elif not isinstance(input, bytes):
input = input.encode(charset)
if PY2:
return StringIO(input)
return io.BytesIO(input)
class Result(object):
"""Holds the captured result of an invoked CLI script."""
def __init__(
self, runner, stdout_bytes, stderr_bytes, exit_code, exception, exc_info=None
):
#: The runner that created the result
self.runner = runner
#: The standard output as bytes.
self.stdout_bytes = stdout_bytes
#: The standard error as bytes, or None if not available
self.stderr_bytes = stderr_bytes
#: The exit code as integer.
self.exit_code = exit_code
#: The exception that happened if one did.
self.exception = exception
#: The traceback
self.exc_info = exc_info
@property
def output(self):
"""The (standard) output as unicode string."""
return self.stdout
@property
def stdout(self):
"""The standard output as unicode string."""
return self.stdout_bytes.decode(self.runner.charset, "replace").replace(
"\r\n", "\n"
)
@property
def stderr(self):
"""The standard error as unicode string."""
if self.stderr_bytes is None:
raise ValueError("stderr not separately captured")
return self.stderr_bytes.decode(self.runner.charset, "replace").replace(
"\r\n", "\n"
)
def __repr__(self):
return "<{} {}>".format(
type(self).__name__, repr(self.exception) if self.exception else "okay"
)
class CliRunner(object):
"""The CLI runner provides functionality to invoke a Click command line
script for unittesting purposes in a isolated environment. This only
works in single-threaded systems without any concurrency as it changes the
global interpreter state.
:param charset: the character set for the input and output data. This is
UTF-8 by default and should not be changed currently as
the reporting to Click only works in Python 2 properly.
:param env: a dictionary with environment variables for overriding.
:param echo_stdin: if this is set to `True`, then reading from stdin writes
to stdout. This is useful for showing examples in
some circumstances. Note that regular prompts
will automatically echo the input.
:param mix_stderr: if this is set to `False`, then stdout and stderr are
preserved as independent streams. This is useful for
Unix-philosophy apps that have predictable stdout and
noisy stderr, such that each may be measured
independently
"""
def __init__(self, charset=None, env=None, echo_stdin=False, mix_stderr=True):
if charset is None:
charset = "utf-8"
self.charset = charset
self.env = env or {}
self.echo_stdin = echo_stdin
self.mix_stderr = mix_stderr
def get_default_prog_name(self, cli):
"""Given a command object it will return the default program name
for it. The default is the `name` attribute or ``"root"`` if not
set.
"""
return cli.name or "root"
def make_env(self, overrides=None):
"""Returns the environment overrides for invoking a script."""
rv = dict(self.env)
if overrides:
rv.update(overrides)
return rv
@contextlib.contextmanager
def isolation(self, input=None, env=None, color=False):
"""A context manager that sets up the isolation for invoking of a
command line tool. This sets up stdin with the given input data
and `os.environ` with the overrides from the given dictionary.
This also rebinds some internals in Click to be mocked (like the
prompt functionality).
This is automatically done in the :meth:`invoke` method.
.. versionadded:: 4.0
The ``color`` parameter was added.
:param input: the input stream to put into sys.stdin.
:param env: the environment overrides as dictionary.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
"""
input = make_input_stream(input, self.charset)
old_stdin = sys.stdin
old_stdout = sys.stdout
old_stderr = sys.stderr
old_forced_width = formatting.FORCED_WIDTH
formatting.FORCED_WIDTH = 80
env = self.make_env(env)
if PY2:
bytes_output = StringIO()
if self.echo_stdin:
input = EchoingStdin(input, bytes_output)
sys.stdout = bytes_output
if not self.mix_stderr:
bytes_error = StringIO()
sys.stderr = bytes_error
else:
bytes_output = io.BytesIO()
if self.echo_stdin:
input = EchoingStdin(input, bytes_output)
input = io.TextIOWrapper(input, encoding=self.charset)
sys.stdout = io.TextIOWrapper(bytes_output, encoding=self.charset)
if not self.mix_stderr:
bytes_error = io.BytesIO()
sys.stderr = io.TextIOWrapper(bytes_error, encoding=self.charset)
if self.mix_stderr:
sys.stderr = sys.stdout
sys.stdin = input
def visible_input(prompt=None):
sys.stdout.write(prompt or "")
val = input.readline().rstrip("\r\n")
sys.stdout.write("{}\n".format(val))
sys.stdout.flush()
return val
def hidden_input(prompt=None):
sys.stdout.write("{}\n".format(prompt or ""))
sys.stdout.flush()
return input.readline().rstrip("\r\n")
def _getchar(echo):
char = sys.stdin.read(1)
if echo:
sys.stdout.write(char)
sys.stdout.flush()
return char
default_color = color
def should_strip_ansi(stream=None, color=None):
if color is None:
return not default_color
return not color
old_visible_prompt_func = termui.visible_prompt_func
old_hidden_prompt_func = termui.hidden_prompt_func
old__getchar_func = termui._getchar
old_should_strip_ansi = utils.should_strip_ansi
termui.visible_prompt_func = visible_input
termui.hidden_prompt_func = hidden_input
termui._getchar = _getchar
utils.should_strip_ansi = should_strip_ansi
old_env = {}
try:
for key, value in iteritems(env):
old_env[key] = os.environ.get(key)
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
yield (bytes_output, not self.mix_stderr and bytes_error)
finally:
for key, value in iteritems(old_env):
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
sys.stdout = old_stdout
sys.stderr = old_stderr
sys.stdin = old_stdin
termui.visible_prompt_func = old_visible_prompt_func
termui.hidden_prompt_func = old_hidden_prompt_func
termui._getchar = old__getchar_func
utils.should_strip_ansi = old_should_strip_ansi
formatting.FORCED_WIDTH = old_forced_width
def invoke(
self,
cli,
args=None,
input=None,
env=None,
catch_exceptions=True,
color=False,
**extra
):
"""Invokes a command in an isolated environment. The arguments are
forwarded directly to the command line script, the `extra` keyword
arguments are passed to the :meth:`~clickpkg.Command.main` function of
the command.
This returns a :class:`Result` object.
.. versionadded:: 3.0
The ``catch_exceptions`` parameter was added.
.. versionchanged:: 3.0
The result object now has an `exc_info` attribute with the
traceback if available.
.. versionadded:: 4.0
The ``color`` parameter was added.
:param cli: the command to invoke
:param args: the arguments to invoke. It may be given as an iterable
or a string. When given as string it will be interpreted
as a Unix shell command. More details at
:func:`shlex.split`.
:param input: the input data for `sys.stdin`.
:param env: the environment overrides.
:param catch_exceptions: Whether to catch any other exceptions than
``SystemExit``.
:param extra: the keyword arguments to pass to :meth:`main`.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
"""
exc_info = None
with self.isolation(input=input, env=env, color=color) as outstreams:
exception = None
exit_code = 0
if isinstance(args, string_types):
args = shlex.split(args)
try:
prog_name = extra.pop("prog_name")
except KeyError:
prog_name = self.get_default_prog_name(cli)
try:
cli.main(args=args or (), prog_name=prog_name, **extra)
except SystemExit as e:
exc_info = sys.exc_info()
exit_code = e.code
if exit_code is None:
exit_code = 0
if exit_code != 0:
exception = e
if not isinstance(exit_code, int):
sys.stdout.write(str(exit_code))
sys.stdout.write("\n")
exit_code = 1
except Exception as e:
if not catch_exceptions:
raise
exception = e
exit_code = 1
exc_info = sys.exc_info()
finally:
sys.stdout.flush()
stdout = outstreams[0].getvalue()
if self.mix_stderr:
stderr = None
else:
stderr = outstreams[1].getvalue()
return Result(
runner=self,
stdout_bytes=stdout,
stderr_bytes=stderr,
exit_code=exit_code,
exception=exception,
exc_info=exc_info,
)
@contextlib.contextmanager
def isolated_filesystem(self):
"""A context manager that creates a temporary folder and changes
the current working directory to it for isolated filesystem tests.
"""
cwd = os.getcwd()
t = tempfile.mkdtemp()
os.chdir(t)
try:
yield t
finally:
os.chdir(cwd)
try:
shutil.rmtree(t)
except (OSError, IOError): # noqa: B014
pass
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/click/decorators.py
|
import inspect
import sys
from functools import update_wrapper
from ._compat import iteritems
from ._unicodefun import _check_for_unicode_literals
from .core import Argument
from .core import Command
from .core import Group
from .core import Option
from .globals import get_current_context
from .utils import echo
def pass_context(f):
"""Marks a callback as wanting to receive the current context
object as first argument.
"""
def new_func(*args, **kwargs):
return f(get_current_context(), *args, **kwargs)
return update_wrapper(new_func, f)
def pass_obj(f):
"""Similar to :func:`pass_context`, but only pass the object on the
context onwards (:attr:`Context.obj`). This is useful if that object
represents the state of a nested system.
"""
def new_func(*args, **kwargs):
return f(get_current_context().obj, *args, **kwargs)
return update_wrapper(new_func, f)
def make_pass_decorator(object_type, ensure=False):
"""Given an object type this creates a decorator that will work
similar to :func:`pass_obj` but instead of passing the object of the
current context, it will find the innermost context of type
:func:`object_type`.
This generates a decorator that works roughly like this::
from functools import update_wrapper
def decorator(f):
@pass_context
def new_func(ctx, *args, **kwargs):
obj = ctx.find_object(object_type)
return ctx.invoke(f, obj, *args, **kwargs)
return update_wrapper(new_func, f)
return decorator
:param object_type: the type of the object to pass.
:param ensure: if set to `True`, a new object will be created and
remembered on the context if it's not there yet.
"""
def decorator(f):
def new_func(*args, **kwargs):
ctx = get_current_context()
if ensure:
obj = ctx.ensure_object(object_type)
else:
obj = ctx.find_object(object_type)
if obj is None:
raise RuntimeError(
"Managed to invoke callback without a context"
" object of type '{}' existing".format(object_type.__name__)
)
return ctx.invoke(f, obj, *args, **kwargs)
return update_wrapper(new_func, f)
return decorator
def _make_command(f, name, attrs, cls):
if isinstance(f, Command):
raise TypeError("Attempted to convert a callback into a command twice.")
try:
params = f.__click_params__
params.reverse()
del f.__click_params__
except AttributeError:
params = []
help = attrs.get("help")
if help is None:
help = inspect.getdoc(f)
if isinstance(help, bytes):
help = help.decode("utf-8")
else:
help = inspect.cleandoc(help)
attrs["help"] = help
_check_for_unicode_literals()
return cls(
name=name or f.__name__.lower().replace("_", "-"),
callback=f,
params=params,
**attrs
)
def command(name=None, cls=None, **attrs):
r"""Creates a new :class:`Command` and uses the decorated function as
callback. This will also automatically attach all decorated
:func:`option`\s and :func:`argument`\s as parameters to the command.
The name of the command defaults to the name of the function with
underscores replaced by dashes. If you want to change that, you can
pass the intended name as the first argument.
All keyword arguments are forwarded to the underlying command class.
Once decorated the function turns into a :class:`Command` instance
that can be invoked as a command line utility or be attached to a
command :class:`Group`.
:param name: the name of the command. This defaults to the function
name with underscores replaced by dashes.
:param cls: the command class to instantiate. This defaults to
:class:`Command`.
"""
if cls is None:
cls = Command
def decorator(f):
cmd = _make_command(f, name, attrs, cls)
cmd.__doc__ = f.__doc__
return cmd
return decorator
def group(name=None, **attrs):
"""Creates a new :class:`Group` with a function as callback. This
works otherwise the same as :func:`command` just that the `cls`
parameter is set to :class:`Group`.
"""
attrs.setdefault("cls", Group)
return command(name, **attrs)
def _param_memo(f, param):
if isinstance(f, Command):
f.params.append(param)
else:
if not hasattr(f, "__click_params__"):
f.__click_params__ = []
f.__click_params__.append(param)
def argument(*param_decls, **attrs):
"""Attaches an argument to the command. All positional arguments are
passed as parameter declarations to :class:`Argument`; all keyword
arguments are forwarded unchanged (except ``cls``).
This is equivalent to creating an :class:`Argument` instance manually
and attaching it to the :attr:`Command.params` list.
:param cls: the argument class to instantiate. This defaults to
:class:`Argument`.
"""
def decorator(f):
ArgumentClass = attrs.pop("cls", Argument)
_param_memo(f, ArgumentClass(param_decls, **attrs))
return f
return decorator
def option(*param_decls, **attrs):
"""Attaches an option to the command. All positional arguments are
passed as parameter declarations to :class:`Option`; all keyword
arguments are forwarded unchanged (except ``cls``).
This is equivalent to creating an :class:`Option` instance manually
and attaching it to the :attr:`Command.params` list.
:param cls: the option class to instantiate. This defaults to
:class:`Option`.
"""
def decorator(f):
# Issue 926, copy attrs, so pre-defined options can re-use the same cls=
option_attrs = attrs.copy()
if "help" in option_attrs:
option_attrs["help"] = inspect.cleandoc(option_attrs["help"])
OptionClass = option_attrs.pop("cls", Option)
_param_memo(f, OptionClass(param_decls, **option_attrs))
return f
return decorator
def confirmation_option(*param_decls, **attrs):
"""Shortcut for confirmation prompts that can be ignored by passing
``--yes`` as parameter.
This is equivalent to decorating a function with :func:`option` with
the following parameters::
def callback(ctx, param, value):
if not value:
ctx.abort()
@click.command()
@click.option('--yes', is_flag=True, callback=callback,
expose_value=False, prompt='Do you want to continue?')
def dropdb():
pass
"""
def decorator(f):
def callback(ctx, param, value):
if not value:
ctx.abort()
attrs.setdefault("is_flag", True)
attrs.setdefault("callback", callback)
attrs.setdefault("expose_value", False)
attrs.setdefault("prompt", "Do you want to continue?")
attrs.setdefault("help", "Confirm the action without prompting.")
return option(*(param_decls or ("--yes",)), **attrs)(f)
return decorator
def password_option(*param_decls, **attrs):
"""Shortcut for password prompts.
This is equivalent to decorating a function with :func:`option` with
the following parameters::
@click.command()
@click.option('--password', prompt=True, confirmation_prompt=True,
hide_input=True)
def changeadmin(password):
pass
"""
def decorator(f):
attrs.setdefault("prompt", True)
attrs.setdefault("confirmation_prompt", True)
attrs.setdefault("hide_input", True)
return option(*(param_decls or ("--password",)), **attrs)(f)
return decorator
def version_option(version=None, *param_decls, **attrs):
"""Adds a ``--version`` option which immediately ends the program
printing out the version number. This is implemented as an eager
option that prints the version and exits the program in the callback.
:param version: the version number to show. If not provided Click
attempts an auto discovery via setuptools.
:param prog_name: the name of the program (defaults to autodetection)
:param message: custom message to show instead of the default
(``'%(prog)s, version %(version)s'``)
:param others: everything else is forwarded to :func:`option`.
"""
if version is None:
if hasattr(sys, "_getframe"):
module = sys._getframe(1).f_globals.get("__name__")
else:
module = ""
def decorator(f):
prog_name = attrs.pop("prog_name", None)
message = attrs.pop("message", "%(prog)s, version %(version)s")
def callback(ctx, param, value):
if not value or ctx.resilient_parsing:
return
prog = prog_name
if prog is None:
prog = ctx.find_root().info_name
ver = version
if ver is None:
try:
import pkg_resources
except ImportError:
pass
else:
for dist in pkg_resources.working_set:
scripts = dist.get_entry_map().get("console_scripts") or {}
for _, entry_point in iteritems(scripts):
if entry_point.module_name == module:
ver = dist.version
break
if ver is None:
raise RuntimeError("Could not determine version")
echo(message % {"prog": prog, "version": ver}, color=ctx.color)
ctx.exit()
attrs.setdefault("is_flag", True)
attrs.setdefault("expose_value", False)
attrs.setdefault("is_eager", True)
attrs.setdefault("help", "Show the version and exit.")
attrs["callback"] = callback
return option(*(param_decls or ("--version",)), **attrs)(f)
return decorator
def help_option(*param_decls, **attrs):
"""Adds a ``--help`` option which immediately ends the program
printing out the help page. This is usually unnecessary to add as
this is added by default to all commands unless suppressed.
Like :func:`version_option`, this is implemented as eager option that
prints in the callback and exits.
All arguments are forwarded to :func:`option`.
"""
def decorator(f):
def callback(ctx, param, value):
if value and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
attrs.setdefault("is_flag", True)
attrs.setdefault("expose_value", False)
attrs.setdefault("help", "Show this message and exit.")
attrs.setdefault("is_eager", True)
attrs["callback"] = callback
return option(*(param_decls or ("--help",)), **attrs)(f)
return decorator
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/SQLAlchemy-1.3.18-py3.9.egg-info/PKG-INFO
|
Metadata-Version: 2.1
Name: SQLAlchemy
Version: 1.3.18
Summary: Database Abstraction Library
Home-page: http://www.sqlalchemy.org
Author: Mike Bayer
Author-email: mike_mp@zzzcomputing.com
License: MIT
Project-URL: Documentation, https://docs.sqlalchemy.org
Project-URL: Issue Tracker, https://github.com/sqlalchemy/sqlalchemy/
Description: SQLAlchemy
==========
The Python SQL Toolkit and Object Relational Mapper
Introduction
-------------
SQLAlchemy is the Python SQL toolkit and Object Relational Mapper
that gives application developers the full power and
flexibility of SQL. SQLAlchemy provides a full suite
of well known enterprise-level persistence patterns,
designed for efficient and high-performing database
access, adapted into a simple and Pythonic domain
language.
Major SQLAlchemy features include:
* An industrial strength ORM, built
from the core on the identity map, unit of work,
and data mapper patterns. These patterns
allow transparent persistence of objects
using a declarative configuration system.
Domain models
can be constructed and manipulated naturally,
and changes are synchronized with the
current transaction automatically.
* A relationally-oriented query system, exposing
the full range of SQL's capabilities
explicitly, including joins, subqueries,
correlation, and most everything else,
in terms of the object model.
Writing queries with the ORM uses the same
techniques of relational composition you use
when writing SQL. While you can drop into
literal SQL at any time, it's virtually never
needed.
* A comprehensive and flexible system
of eager loading for related collections and objects.
Collections are cached within a session,
and can be loaded on individual access, all
at once using joins, or by query per collection
across the full result set.
* A Core SQL construction system and DBAPI
interaction layer. The SQLAlchemy Core is
separate from the ORM and is a full database
abstraction layer in its own right, and includes
an extensible Python-based SQL expression
language, schema metadata, connection pooling,
type coercion, and custom types.
* All primary and foreign key constraints are
assumed to be composite and natural. Surrogate
integer primary keys are of course still the
norm, but SQLAlchemy never assumes or hardcodes
to this model.
* Database introspection and generation. Database
schemas can be "reflected" in one step into
Python structures representing database metadata;
those same structures can then generate
CREATE statements right back out - all within
the Core, independent of the ORM.
SQLAlchemy's philosophy:
* SQL databases behave less and less like object
collections the more size and performance start to
matter; object collections behave less and less like
tables and rows the more abstraction starts to matter.
SQLAlchemy aims to accommodate both of these
principles.
* An ORM doesn't need to hide the "R". A relational
database provides rich, set-based functionality
that should be fully exposed. SQLAlchemy's
ORM provides an open-ended set of patterns
that allow a developer to construct a custom
mediation layer between a domain model and
a relational schema, turning the so-called
"object relational impedance" issue into
a distant memory.
* The developer, in all cases, makes all decisions
regarding the design, structure, and naming conventions
of both the object model as well as the relational
schema. SQLAlchemy only provides the means
to automate the execution of these decisions.
* With SQLAlchemy, there's no such thing as
"the ORM generated a bad query" - you
retain full control over the structure of
queries, including how joins are organized,
how subqueries and correlation is used, what
columns are requested. Everything SQLAlchemy
does is ultimately the result of a developer-
initiated decision.
* Don't use an ORM if the problem doesn't need one.
SQLAlchemy consists of a Core and separate ORM
component. The Core offers a full SQL expression
language that allows Pythonic construction
of SQL constructs that render directly to SQL
strings for a target database, returning
result sets that are essentially enhanced DBAPI
cursors.
* Transactions should be the norm. With SQLAlchemy's
ORM, nothing goes to permanent storage until
commit() is called. SQLAlchemy encourages applications
to create a consistent means of delineating
the start and end of a series of operations.
* Never render a literal value in a SQL statement.
Bound parameters are used to the greatest degree
possible, allowing query optimizers to cache
query plans effectively and making SQL injection
attacks a non-issue.
Documentation
-------------
Latest documentation is at:
http://www.sqlalchemy.org/docs/
Installation / Requirements
---------------------------
Full documentation for installation is at
`Installation <http://www.sqlalchemy.org/docs/intro.html#installation>`_.
Getting Help / Development / Bug reporting
------------------------------------------
Please refer to the `SQLAlchemy Community Guide <http://www.sqlalchemy.org/support.html>`_.
Code of Conduct
---------------
Above all, SQLAlchemy places great emphasis on polite, thoughtful, and
constructive communication between users and developers.
Please see our current Code of Conduct at
`Code of Conduct <http://www.sqlalchemy.org/codeofconduct.html>`_.
License
-------
SQLAlchemy is distributed under the `MIT license
<http://www.opensource.org/licenses/mit-license.php>`_.
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: MIT License
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Topic :: Database :: Front-Ends
Classifier: Operating System :: OS Independent
Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
Provides-Extra: mysql
Provides-Extra: pymysql
Provides-Extra: postgresql
Provides-Extra: postgresql_psycopg2binary
Provides-Extra: postgresql_pg8000
Provides-Extra: postgresql_psycopg2cffi
Provides-Extra: oracle
Provides-Extra: mssql_pyodbc
Provides-Extra: mssql_pymssql
Provides-Extra: mssql
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/SQLAlchemy-1.3.18-py3.9.egg-info/installed-files.txt
|
..\sqlalchemy\__init__.py
..\sqlalchemy\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\__pycache__\events.cpython-39.pyc
..\sqlalchemy\__pycache__\exc.cpython-39.pyc
..\sqlalchemy\__pycache__\inspection.cpython-39.pyc
..\sqlalchemy\__pycache__\interfaces.cpython-39.pyc
..\sqlalchemy\__pycache__\log.cpython-39.pyc
..\sqlalchemy\__pycache__\processors.cpython-39.pyc
..\sqlalchemy\__pycache__\schema.cpython-39.pyc
..\sqlalchemy\__pycache__\types.cpython-39.pyc
..\sqlalchemy\connectors\__init__.py
..\sqlalchemy\connectors\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\connectors\__pycache__\mxodbc.cpython-39.pyc
..\sqlalchemy\connectors\__pycache__\pyodbc.cpython-39.pyc
..\sqlalchemy\connectors\__pycache__\zxJDBC.cpython-39.pyc
..\sqlalchemy\connectors\mxodbc.py
..\sqlalchemy\connectors\pyodbc.py
..\sqlalchemy\connectors\zxJDBC.py
..\sqlalchemy\databases\__init__.py
..\sqlalchemy\databases\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\dialects\__init__.py
..\sqlalchemy\dialects\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\dialects\firebird\__init__.py
..\sqlalchemy\dialects\firebird\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\dialects\firebird\__pycache__\base.cpython-39.pyc
..\sqlalchemy\dialects\firebird\__pycache__\fdb.cpython-39.pyc
..\sqlalchemy\dialects\firebird\__pycache__\kinterbasdb.cpython-39.pyc
..\sqlalchemy\dialects\firebird\base.py
..\sqlalchemy\dialects\firebird\fdb.py
..\sqlalchemy\dialects\firebird\kinterbasdb.py
..\sqlalchemy\dialects\mssql\__init__.py
..\sqlalchemy\dialects\mssql\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\dialects\mssql\__pycache__\adodbapi.cpython-39.pyc
..\sqlalchemy\dialects\mssql\__pycache__\base.cpython-39.pyc
..\sqlalchemy\dialects\mssql\__pycache__\information_schema.cpython-39.pyc
..\sqlalchemy\dialects\mssql\__pycache__\mxodbc.cpython-39.pyc
..\sqlalchemy\dialects\mssql\__pycache__\provision.cpython-39.pyc
..\sqlalchemy\dialects\mssql\__pycache__\pymssql.cpython-39.pyc
..\sqlalchemy\dialects\mssql\__pycache__\pyodbc.cpython-39.pyc
..\sqlalchemy\dialects\mssql\__pycache__\zxjdbc.cpython-39.pyc
..\sqlalchemy\dialects\mssql\adodbapi.py
..\sqlalchemy\dialects\mssql\base.py
..\sqlalchemy\dialects\mssql\information_schema.py
..\sqlalchemy\dialects\mssql\mxodbc.py
..\sqlalchemy\dialects\mssql\provision.py
..\sqlalchemy\dialects\mssql\pymssql.py
..\sqlalchemy\dialects\mssql\pyodbc.py
..\sqlalchemy\dialects\mssql\zxjdbc.py
..\sqlalchemy\dialects\mysql\__init__.py
..\sqlalchemy\dialects\mysql\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\dialects\mysql\__pycache__\base.cpython-39.pyc
..\sqlalchemy\dialects\mysql\__pycache__\cymysql.cpython-39.pyc
..\sqlalchemy\dialects\mysql\__pycache__\dml.cpython-39.pyc
..\sqlalchemy\dialects\mysql\__pycache__\enumerated.cpython-39.pyc
..\sqlalchemy\dialects\mysql\__pycache__\gaerdbms.cpython-39.pyc
..\sqlalchemy\dialects\mysql\__pycache__\json.cpython-39.pyc
..\sqlalchemy\dialects\mysql\__pycache__\mysqlconnector.cpython-39.pyc
..\sqlalchemy\dialects\mysql\__pycache__\mysqldb.cpython-39.pyc
..\sqlalchemy\dialects\mysql\__pycache__\oursql.cpython-39.pyc
..\sqlalchemy\dialects\mysql\__pycache__\provision.cpython-39.pyc
..\sqlalchemy\dialects\mysql\__pycache__\pymysql.cpython-39.pyc
..\sqlalchemy\dialects\mysql\__pycache__\pyodbc.cpython-39.pyc
..\sqlalchemy\dialects\mysql\__pycache__\reflection.cpython-39.pyc
..\sqlalchemy\dialects\mysql\__pycache__\types.cpython-39.pyc
..\sqlalchemy\dialects\mysql\__pycache__\zxjdbc.cpython-39.pyc
..\sqlalchemy\dialects\mysql\base.py
..\sqlalchemy\dialects\mysql\cymysql.py
..\sqlalchemy\dialects\mysql\dml.py
..\sqlalchemy\dialects\mysql\enumerated.py
..\sqlalchemy\dialects\mysql\gaerdbms.py
..\sqlalchemy\dialects\mysql\json.py
..\sqlalchemy\dialects\mysql\mysqlconnector.py
..\sqlalchemy\dialects\mysql\mysqldb.py
..\sqlalchemy\dialects\mysql\oursql.py
..\sqlalchemy\dialects\mysql\provision.py
..\sqlalchemy\dialects\mysql\pymysql.py
..\sqlalchemy\dialects\mysql\pyodbc.py
..\sqlalchemy\dialects\mysql\reflection.py
..\sqlalchemy\dialects\mysql\types.py
..\sqlalchemy\dialects\mysql\zxjdbc.py
..\sqlalchemy\dialects\oracle\__init__.py
..\sqlalchemy\dialects\oracle\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\dialects\oracle\__pycache__\base.cpython-39.pyc
..\sqlalchemy\dialects\oracle\__pycache__\cx_oracle.cpython-39.pyc
..\sqlalchemy\dialects\oracle\__pycache__\provision.cpython-39.pyc
..\sqlalchemy\dialects\oracle\__pycache__\zxjdbc.cpython-39.pyc
..\sqlalchemy\dialects\oracle\base.py
..\sqlalchemy\dialects\oracle\cx_oracle.py
..\sqlalchemy\dialects\oracle\provision.py
..\sqlalchemy\dialects\oracle\zxjdbc.py
..\sqlalchemy\dialects\postgresql\__init__.py
..\sqlalchemy\dialects\postgresql\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\dialects\postgresql\__pycache__\array.cpython-39.pyc
..\sqlalchemy\dialects\postgresql\__pycache__\base.cpython-39.pyc
..\sqlalchemy\dialects\postgresql\__pycache__\dml.cpython-39.pyc
..\sqlalchemy\dialects\postgresql\__pycache__\ext.cpython-39.pyc
..\sqlalchemy\dialects\postgresql\__pycache__\hstore.cpython-39.pyc
..\sqlalchemy\dialects\postgresql\__pycache__\json.cpython-39.pyc
..\sqlalchemy\dialects\postgresql\__pycache__\pg8000.cpython-39.pyc
..\sqlalchemy\dialects\postgresql\__pycache__\provision.cpython-39.pyc
..\sqlalchemy\dialects\postgresql\__pycache__\psycopg2.cpython-39.pyc
..\sqlalchemy\dialects\postgresql\__pycache__\psycopg2cffi.cpython-39.pyc
..\sqlalchemy\dialects\postgresql\__pycache__\pygresql.cpython-39.pyc
..\sqlalchemy\dialects\postgresql\__pycache__\pypostgresql.cpython-39.pyc
..\sqlalchemy\dialects\postgresql\__pycache__\ranges.cpython-39.pyc
..\sqlalchemy\dialects\postgresql\__pycache__\zxjdbc.cpython-39.pyc
..\sqlalchemy\dialects\postgresql\array.py
..\sqlalchemy\dialects\postgresql\base.py
..\sqlalchemy\dialects\postgresql\dml.py
..\sqlalchemy\dialects\postgresql\ext.py
..\sqlalchemy\dialects\postgresql\hstore.py
..\sqlalchemy\dialects\postgresql\json.py
..\sqlalchemy\dialects\postgresql\pg8000.py
..\sqlalchemy\dialects\postgresql\provision.py
..\sqlalchemy\dialects\postgresql\psycopg2.py
..\sqlalchemy\dialects\postgresql\psycopg2cffi.py
..\sqlalchemy\dialects\postgresql\pygresql.py
..\sqlalchemy\dialects\postgresql\pypostgresql.py
..\sqlalchemy\dialects\postgresql\ranges.py
..\sqlalchemy\dialects\postgresql\zxjdbc.py
..\sqlalchemy\dialects\sqlite\__init__.py
..\sqlalchemy\dialects\sqlite\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\dialects\sqlite\__pycache__\base.cpython-39.pyc
..\sqlalchemy\dialects\sqlite\__pycache__\json.cpython-39.pyc
..\sqlalchemy\dialects\sqlite\__pycache__\provision.cpython-39.pyc
..\sqlalchemy\dialects\sqlite\__pycache__\pysqlcipher.cpython-39.pyc
..\sqlalchemy\dialects\sqlite\__pycache__\pysqlite.cpython-39.pyc
..\sqlalchemy\dialects\sqlite\base.py
..\sqlalchemy\dialects\sqlite\json.py
..\sqlalchemy\dialects\sqlite\provision.py
..\sqlalchemy\dialects\sqlite\pysqlcipher.py
..\sqlalchemy\dialects\sqlite\pysqlite.py
..\sqlalchemy\dialects\sybase\__init__.py
..\sqlalchemy\dialects\sybase\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\dialects\sybase\__pycache__\base.cpython-39.pyc
..\sqlalchemy\dialects\sybase\__pycache__\mxodbc.cpython-39.pyc
..\sqlalchemy\dialects\sybase\__pycache__\pyodbc.cpython-39.pyc
..\sqlalchemy\dialects\sybase\__pycache__\pysybase.cpython-39.pyc
..\sqlalchemy\dialects\sybase\base.py
..\sqlalchemy\dialects\sybase\mxodbc.py
..\sqlalchemy\dialects\sybase\pyodbc.py
..\sqlalchemy\dialects\sybase\pysybase.py
..\sqlalchemy\engine\__init__.py
..\sqlalchemy\engine\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\engine\__pycache__\base.cpython-39.pyc
..\sqlalchemy\engine\__pycache__\default.cpython-39.pyc
..\sqlalchemy\engine\__pycache__\interfaces.cpython-39.pyc
..\sqlalchemy\engine\__pycache__\reflection.cpython-39.pyc
..\sqlalchemy\engine\__pycache__\result.cpython-39.pyc
..\sqlalchemy\engine\__pycache__\strategies.cpython-39.pyc
..\sqlalchemy\engine\__pycache__\threadlocal.cpython-39.pyc
..\sqlalchemy\engine\__pycache__\url.cpython-39.pyc
..\sqlalchemy\engine\__pycache__\util.cpython-39.pyc
..\sqlalchemy\engine\base.py
..\sqlalchemy\engine\default.py
..\sqlalchemy\engine\interfaces.py
..\sqlalchemy\engine\reflection.py
..\sqlalchemy\engine\result.py
..\sqlalchemy\engine\strategies.py
..\sqlalchemy\engine\threadlocal.py
..\sqlalchemy\engine\url.py
..\sqlalchemy\engine\util.py
..\sqlalchemy\event\__init__.py
..\sqlalchemy\event\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\event\__pycache__\api.cpython-39.pyc
..\sqlalchemy\event\__pycache__\attr.cpython-39.pyc
..\sqlalchemy\event\__pycache__\base.cpython-39.pyc
..\sqlalchemy\event\__pycache__\legacy.cpython-39.pyc
..\sqlalchemy\event\__pycache__\registry.cpython-39.pyc
..\sqlalchemy\event\api.py
..\sqlalchemy\event\attr.py
..\sqlalchemy\event\base.py
..\sqlalchemy\event\legacy.py
..\sqlalchemy\event\registry.py
..\sqlalchemy\events.py
..\sqlalchemy\exc.py
..\sqlalchemy\ext\__init__.py
..\sqlalchemy\ext\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\ext\__pycache__\associationproxy.cpython-39.pyc
..\sqlalchemy\ext\__pycache__\automap.cpython-39.pyc
..\sqlalchemy\ext\__pycache__\baked.cpython-39.pyc
..\sqlalchemy\ext\__pycache__\compiler.cpython-39.pyc
..\sqlalchemy\ext\__pycache__\horizontal_shard.cpython-39.pyc
..\sqlalchemy\ext\__pycache__\hybrid.cpython-39.pyc
..\sqlalchemy\ext\__pycache__\indexable.cpython-39.pyc
..\sqlalchemy\ext\__pycache__\instrumentation.cpython-39.pyc
..\sqlalchemy\ext\__pycache__\mutable.cpython-39.pyc
..\sqlalchemy\ext\__pycache__\orderinglist.cpython-39.pyc
..\sqlalchemy\ext\__pycache__\serializer.cpython-39.pyc
..\sqlalchemy\ext\associationproxy.py
..\sqlalchemy\ext\automap.py
..\sqlalchemy\ext\baked.py
..\sqlalchemy\ext\compiler.py
..\sqlalchemy\ext\declarative\__init__.py
..\sqlalchemy\ext\declarative\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\ext\declarative\__pycache__\api.cpython-39.pyc
..\sqlalchemy\ext\declarative\__pycache__\base.cpython-39.pyc
..\sqlalchemy\ext\declarative\__pycache__\clsregistry.cpython-39.pyc
..\sqlalchemy\ext\declarative\api.py
..\sqlalchemy\ext\declarative\base.py
..\sqlalchemy\ext\declarative\clsregistry.py
..\sqlalchemy\ext\horizontal_shard.py
..\sqlalchemy\ext\hybrid.py
..\sqlalchemy\ext\indexable.py
..\sqlalchemy\ext\instrumentation.py
..\sqlalchemy\ext\mutable.py
..\sqlalchemy\ext\orderinglist.py
..\sqlalchemy\ext\serializer.py
..\sqlalchemy\inspection.py
..\sqlalchemy\interfaces.py
..\sqlalchemy\log.py
..\sqlalchemy\orm\__init__.py
..\sqlalchemy\orm\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\attributes.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\base.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\collections.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\dependency.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\deprecated_interfaces.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\descriptor_props.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\dynamic.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\evaluator.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\events.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\exc.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\identity.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\instrumentation.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\interfaces.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\loading.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\mapper.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\path_registry.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\persistence.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\properties.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\query.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\relationships.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\scoping.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\session.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\state.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\strategies.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\strategy_options.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\sync.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\unitofwork.cpython-39.pyc
..\sqlalchemy\orm\__pycache__\util.cpython-39.pyc
..\sqlalchemy\orm\attributes.py
..\sqlalchemy\orm\base.py
..\sqlalchemy\orm\collections.py
..\sqlalchemy\orm\dependency.py
..\sqlalchemy\orm\deprecated_interfaces.py
..\sqlalchemy\orm\descriptor_props.py
..\sqlalchemy\orm\dynamic.py
..\sqlalchemy\orm\evaluator.py
..\sqlalchemy\orm\events.py
..\sqlalchemy\orm\exc.py
..\sqlalchemy\orm\identity.py
..\sqlalchemy\orm\instrumentation.py
..\sqlalchemy\orm\interfaces.py
..\sqlalchemy\orm\loading.py
..\sqlalchemy\orm\mapper.py
..\sqlalchemy\orm\path_registry.py
..\sqlalchemy\orm\persistence.py
..\sqlalchemy\orm\properties.py
..\sqlalchemy\orm\query.py
..\sqlalchemy\orm\relationships.py
..\sqlalchemy\orm\scoping.py
..\sqlalchemy\orm\session.py
..\sqlalchemy\orm\state.py
..\sqlalchemy\orm\strategies.py
..\sqlalchemy\orm\strategy_options.py
..\sqlalchemy\orm\sync.py
..\sqlalchemy\orm\unitofwork.py
..\sqlalchemy\orm\util.py
..\sqlalchemy\pool\__init__.py
..\sqlalchemy\pool\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\pool\__pycache__\base.cpython-39.pyc
..\sqlalchemy\pool\__pycache__\dbapi_proxy.cpython-39.pyc
..\sqlalchemy\pool\__pycache__\impl.cpython-39.pyc
..\sqlalchemy\pool\base.py
..\sqlalchemy\pool\dbapi_proxy.py
..\sqlalchemy\pool\impl.py
..\sqlalchemy\processors.py
..\sqlalchemy\schema.py
..\sqlalchemy\sql\__init__.py
..\sqlalchemy\sql\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\annotation.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\base.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\compiler.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\crud.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\ddl.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\default_comparator.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\dml.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\elements.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\expression.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\functions.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\naming.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\operators.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\schema.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\selectable.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\sqltypes.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\type_api.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\util.cpython-39.pyc
..\sqlalchemy\sql\__pycache__\visitors.cpython-39.pyc
..\sqlalchemy\sql\annotation.py
..\sqlalchemy\sql\base.py
..\sqlalchemy\sql\compiler.py
..\sqlalchemy\sql\crud.py
..\sqlalchemy\sql\ddl.py
..\sqlalchemy\sql\default_comparator.py
..\sqlalchemy\sql\dml.py
..\sqlalchemy\sql\elements.py
..\sqlalchemy\sql\expression.py
..\sqlalchemy\sql\functions.py
..\sqlalchemy\sql\naming.py
..\sqlalchemy\sql\operators.py
..\sqlalchemy\sql\schema.py
..\sqlalchemy\sql\selectable.py
..\sqlalchemy\sql\sqltypes.py
..\sqlalchemy\sql\type_api.py
..\sqlalchemy\sql\util.py
..\sqlalchemy\sql\visitors.py
..\sqlalchemy\testing\__init__.py
..\sqlalchemy\testing\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\testing\__pycache__\assertions.cpython-39.pyc
..\sqlalchemy\testing\__pycache__\assertsql.cpython-39.pyc
..\sqlalchemy\testing\__pycache__\config.cpython-39.pyc
..\sqlalchemy\testing\__pycache__\engines.cpython-39.pyc
..\sqlalchemy\testing\__pycache__\entities.cpython-39.pyc
..\sqlalchemy\testing\__pycache__\exclusions.cpython-39.pyc
..\sqlalchemy\testing\__pycache__\fixtures.cpython-39.pyc
..\sqlalchemy\testing\__pycache__\mock.cpython-39.pyc
..\sqlalchemy\testing\__pycache__\pickleable.cpython-39.pyc
..\sqlalchemy\testing\__pycache__\profiling.cpython-39.pyc
..\sqlalchemy\testing\__pycache__\provision.cpython-39.pyc
..\sqlalchemy\testing\__pycache__\replay_fixture.cpython-39.pyc
..\sqlalchemy\testing\__pycache__\requirements.cpython-39.pyc
..\sqlalchemy\testing\__pycache__\schema.cpython-39.pyc
..\sqlalchemy\testing\__pycache__\util.cpython-39.pyc
..\sqlalchemy\testing\__pycache__\warnings.cpython-39.pyc
..\sqlalchemy\testing\assertions.py
..\sqlalchemy\testing\assertsql.py
..\sqlalchemy\testing\config.py
..\sqlalchemy\testing\engines.py
..\sqlalchemy\testing\entities.py
..\sqlalchemy\testing\exclusions.py
..\sqlalchemy\testing\fixtures.py
..\sqlalchemy\testing\mock.py
..\sqlalchemy\testing\pickleable.py
..\sqlalchemy\testing\plugin\__init__.py
..\sqlalchemy\testing\plugin\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\testing\plugin\__pycache__\bootstrap.cpython-39.pyc
..\sqlalchemy\testing\plugin\__pycache__\plugin_base.cpython-39.pyc
..\sqlalchemy\testing\plugin\__pycache__\pytestplugin.cpython-39.pyc
..\sqlalchemy\testing\plugin\bootstrap.py
..\sqlalchemy\testing\plugin\plugin_base.py
..\sqlalchemy\testing\plugin\pytestplugin.py
..\sqlalchemy\testing\profiling.py
..\sqlalchemy\testing\provision.py
..\sqlalchemy\testing\replay_fixture.py
..\sqlalchemy\testing\requirements.py
..\sqlalchemy\testing\schema.py
..\sqlalchemy\testing\suite\__init__.py
..\sqlalchemy\testing\suite\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\testing\suite\__pycache__\test_cte.cpython-39.pyc
..\sqlalchemy\testing\suite\__pycache__\test_ddl.cpython-39.pyc
..\sqlalchemy\testing\suite\__pycache__\test_dialect.cpython-39.pyc
..\sqlalchemy\testing\suite\__pycache__\test_insert.cpython-39.pyc
..\sqlalchemy\testing\suite\__pycache__\test_reflection.cpython-39.pyc
..\sqlalchemy\testing\suite\__pycache__\test_results.cpython-39.pyc
..\sqlalchemy\testing\suite\__pycache__\test_select.cpython-39.pyc
..\sqlalchemy\testing\suite\__pycache__\test_sequence.cpython-39.pyc
..\sqlalchemy\testing\suite\__pycache__\test_types.cpython-39.pyc
..\sqlalchemy\testing\suite\__pycache__\test_update_delete.cpython-39.pyc
..\sqlalchemy\testing\suite\test_cte.py
..\sqlalchemy\testing\suite\test_ddl.py
..\sqlalchemy\testing\suite\test_dialect.py
..\sqlalchemy\testing\suite\test_insert.py
..\sqlalchemy\testing\suite\test_reflection.py
..\sqlalchemy\testing\suite\test_results.py
..\sqlalchemy\testing\suite\test_select.py
..\sqlalchemy\testing\suite\test_sequence.py
..\sqlalchemy\testing\suite\test_types.py
..\sqlalchemy\testing\suite\test_update_delete.py
..\sqlalchemy\testing\util.py
..\sqlalchemy\testing\warnings.py
..\sqlalchemy\types.py
..\sqlalchemy\util\__init__.py
..\sqlalchemy\util\__pycache__\__init__.cpython-39.pyc
..\sqlalchemy\util\__pycache__\_collections.cpython-39.pyc
..\sqlalchemy\util\__pycache__\compat.cpython-39.pyc
..\sqlalchemy\util\__pycache__\deprecations.cpython-39.pyc
..\sqlalchemy\util\__pycache__\langhelpers.cpython-39.pyc
..\sqlalchemy\util\__pycache__\queue.cpython-39.pyc
..\sqlalchemy\util\__pycache__\topological.cpython-39.pyc
..\sqlalchemy\util\_collections.py
..\sqlalchemy\util\compat.py
..\sqlalchemy\util\deprecations.py
..\sqlalchemy\util\langhelpers.py
..\sqlalchemy\util\queue.py
..\sqlalchemy\util\topological.py
PKG-INFO
SOURCES.txt
dependency_links.txt
requires.txt
top_level.txt
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/SQLAlchemy-1.3.18-py3.9.egg-info/SOURCES.txt
|
AUTHORS
CHANGES
LICENSE
MANIFEST.in
README.dialects.rst
README.rst
README.unittests.rst
setup.cfg
setup.py
tox.ini
doc/contents.html
doc/copyright.html
doc/errors.html
doc/genindex.html
doc/glossary.html
doc/index.html
doc/intro.html
doc/notfound.html
doc/search.html
doc/searchindex.js
doc/_images/sqla_arch_small.png
doc/_images/sqla_engine_arch.png
doc/_static/basic.css
doc/_static/changelog.css
doc/_static/detectmobile.js
doc/_static/docs.css
doc/_static/doctools.js
doc/_static/documentation_options.js
doc/_static/dragons.png
doc/_static/file.png
doc/_static/init.js
doc/_static/jquery-3.5.1.js
doc/_static/jquery.js
doc/_static/language_data.js
doc/_static/minus.png
doc/_static/plus.png
doc/_static/pygments.css
doc/_static/searchtools.js
doc/_static/sphinx_paramlinks.css
doc/_static/underscore-1.3.1.js
doc/_static/underscore.js
doc/build/Makefile
doc/build/conf.py
doc/build/contents.rst
doc/build/copyright.rst
doc/build/errors.rst
doc/build/glossary.rst
doc/build/index.rst
doc/build/intro.rst
doc/build/requirements.txt
doc/build/sqla_arch_small.png
doc/build/changelog/README.txt
doc/build/changelog/changelog_01.rst
doc/build/changelog/changelog_02.rst
doc/build/changelog/changelog_03.rst
doc/build/changelog/changelog_04.rst
doc/build/changelog/changelog_05.rst
doc/build/changelog/changelog_06.rst
doc/build/changelog/changelog_07.rst
doc/build/changelog/changelog_08.rst
doc/build/changelog/changelog_09.rst
doc/build/changelog/changelog_10.rst
doc/build/changelog/changelog_11.rst
doc/build/changelog/changelog_12.rst
doc/build/changelog/changelog_13.rst
doc/build/changelog/index.rst
doc/build/changelog/migration_04.rst
doc/build/changelog/migration_05.rst
doc/build/changelog/migration_06.rst
doc/build/changelog/migration_07.rst
doc/build/changelog/migration_08.rst
doc/build/changelog/migration_09.rst
doc/build/changelog/migration_10.rst
doc/build/changelog/migration_11.rst
doc/build/changelog/migration_12.rst
doc/build/changelog/migration_13.rst
doc/build/changelog/unreleased_11/README.txt
doc/build/changelog/unreleased_12/README.txt
doc/build/changelog/unreleased_13/README.txt
doc/build/core/api_basics.rst
doc/build/core/compiler.rst
doc/build/core/connections.rst
doc/build/core/constraints.rst
doc/build/core/custom_types.rst
doc/build/core/ddl.rst
doc/build/core/defaults.rst
doc/build/core/dml.rst
doc/build/core/engines.rst
doc/build/core/engines_connections.rst
doc/build/core/event.rst
doc/build/core/events.rst
doc/build/core/exceptions.rst
doc/build/core/expression_api.rst
doc/build/core/functions.rst
doc/build/core/index.rst
doc/build/core/inspection.rst
doc/build/core/interfaces.rst
doc/build/core/internals.rst
doc/build/core/metadata.rst
doc/build/core/pooling.rst
doc/build/core/reflection.rst
doc/build/core/schema.rst
doc/build/core/selectable.rst
doc/build/core/serializer.rst
doc/build/core/sqla_engine_arch.png
doc/build/core/sqlelement.rst
doc/build/core/tutorial.rst
doc/build/core/type_api.rst
doc/build/core/type_basics.rst
doc/build/core/types.rst
doc/build/core/visitors.rst
doc/build/dialects/firebird.rst
doc/build/dialects/index.rst
doc/build/dialects/mssql.rst
doc/build/dialects/mysql.rst
doc/build/dialects/oracle.rst
doc/build/dialects/postgresql.rst
doc/build/dialects/sqlite.rst
doc/build/dialects/sybase.rst
doc/build/faq/connections.rst
doc/build/faq/index.rst
doc/build/faq/metadata_schema.rst
doc/build/faq/ormconfiguration.rst
doc/build/faq/performance.rst
doc/build/faq/sessions.rst
doc/build/faq/sqlexpressions.rst
doc/build/orm/backref.rst
doc/build/orm/basic_relationships.rst
doc/build/orm/cascades.rst
doc/build/orm/classical.rst
doc/build/orm/collections.rst
doc/build/orm/composites.rst
doc/build/orm/constructors.rst
doc/build/orm/contextual.rst
doc/build/orm/deprecated.rst
doc/build/orm/events.rst
doc/build/orm/examples.rst
doc/build/orm/exceptions.rst
doc/build/orm/extending.rst
doc/build/orm/index.rst
doc/build/orm/inheritance.rst
doc/build/orm/inheritance_loading.rst
doc/build/orm/internals.rst
doc/build/orm/join_conditions.rst
doc/build/orm/loading.rst
doc/build/orm/loading_columns.rst
doc/build/orm/loading_objects.rst
doc/build/orm/loading_relationships.rst
doc/build/orm/mapped_attributes.rst
doc/build/orm/mapped_sql_expr.rst
doc/build/orm/mapper_config.rst
doc/build/orm/mapping_api.rst
doc/build/orm/mapping_columns.rst
doc/build/orm/mapping_styles.rst
doc/build/orm/nonstandard_mappings.rst
doc/build/orm/persistence_techniques.rst
doc/build/orm/query.rst
doc/build/orm/relationship_api.rst
doc/build/orm/relationship_persistence.rst
doc/build/orm/relationships.rst
doc/build/orm/scalar_mapping.rst
doc/build/orm/self_referential.rst
doc/build/orm/session.rst
doc/build/orm/session_api.rst
doc/build/orm/session_basics.rst
doc/build/orm/session_events.rst
doc/build/orm/session_state_management.rst
doc/build/orm/session_transaction.rst
doc/build/orm/tutorial.rst
doc/build/orm/versioning.rst
doc/build/orm/extensions/associationproxy.rst
doc/build/orm/extensions/automap.rst
doc/build/orm/extensions/baked.rst
doc/build/orm/extensions/horizontal_shard.rst
doc/build/orm/extensions/hybrid.rst
doc/build/orm/extensions/index.rst
doc/build/orm/extensions/indexable.rst
doc/build/orm/extensions/instrumentation.rst
doc/build/orm/extensions/mutable.rst
doc/build/orm/extensions/orderinglist.rst
doc/build/orm/extensions/declarative/api.rst
doc/build/orm/extensions/declarative/basic_use.rst
doc/build/orm/extensions/declarative/index.rst
doc/build/orm/extensions/declarative/inheritance.rst
doc/build/orm/extensions/declarative/mixins.rst
doc/build/orm/extensions/declarative/relationships.rst
doc/build/orm/extensions/declarative/table_config.rst
doc/build/texinputs/Makefile
doc/build/texinputs/sphinx.sty
doc/changelog/changelog_01.html
doc/changelog/changelog_02.html
doc/changelog/changelog_03.html
doc/changelog/changelog_04.html
doc/changelog/changelog_05.html
doc/changelog/changelog_06.html
doc/changelog/changelog_07.html
doc/changelog/changelog_08.html
doc/changelog/changelog_09.html
doc/changelog/changelog_10.html
doc/changelog/changelog_11.html
doc/changelog/changelog_12.html
doc/changelog/changelog_13.html
doc/changelog/index.html
doc/changelog/migration_04.html
doc/changelog/migration_05.html
doc/changelog/migration_06.html
doc/changelog/migration_07.html
doc/changelog/migration_08.html
doc/changelog/migration_09.html
doc/changelog/migration_10.html
doc/changelog/migration_11.html
doc/changelog/migration_12.html
doc/changelog/migration_13.html
doc/core/api_basics.html
doc/core/compiler.html
doc/core/connections.html
doc/core/constraints.html
doc/core/custom_types.html
doc/core/ddl.html
doc/core/defaults.html
doc/core/dml.html
doc/core/engines.html
doc/core/engines_connections.html
doc/core/event.html
doc/core/events.html
doc/core/exceptions.html
doc/core/expression_api.html
doc/core/functions.html
doc/core/index.html
doc/core/inspection.html
doc/core/interfaces.html
doc/core/internals.html
doc/core/metadata.html
doc/core/pooling.html
doc/core/reflection.html
doc/core/schema.html
doc/core/selectable.html
doc/core/serializer.html
doc/core/sqlelement.html
doc/core/tutorial.html
doc/core/type_api.html
doc/core/type_basics.html
doc/core/types.html
doc/core/visitors.html
doc/dialects/firebird.html
doc/dialects/index.html
doc/dialects/mssql.html
doc/dialects/mysql.html
doc/dialects/oracle.html
doc/dialects/postgresql.html
doc/dialects/sqlite.html
doc/dialects/sybase.html
doc/faq/connections.html
doc/faq/index.html
doc/faq/metadata_schema.html
doc/faq/ormconfiguration.html
doc/faq/performance.html
doc/faq/sessions.html
doc/faq/sqlexpressions.html
doc/orm/backref.html
doc/orm/basic_relationships.html
doc/orm/cascades.html
doc/orm/classical.html
doc/orm/collections.html
doc/orm/composites.html
doc/orm/constructors.html
doc/orm/contextual.html
doc/orm/deprecated.html
doc/orm/events.html
doc/orm/examples.html
doc/orm/exceptions.html
doc/orm/extending.html
doc/orm/index.html
doc/orm/inheritance.html
doc/orm/inheritance_loading.html
doc/orm/internals.html
doc/orm/join_conditions.html
doc/orm/loading.html
doc/orm/loading_columns.html
doc/orm/loading_objects.html
doc/orm/loading_relationships.html
doc/orm/mapped_attributes.html
doc/orm/mapped_sql_expr.html
doc/orm/mapper_config.html
doc/orm/mapping_api.html
doc/orm/mapping_columns.html
doc/orm/mapping_styles.html
doc/orm/nonstandard_mappings.html
doc/orm/persistence_techniques.html
doc/orm/query.html
doc/orm/relationship_api.html
doc/orm/relationship_persistence.html
doc/orm/relationships.html
doc/orm/scalar_mapping.html
doc/orm/self_referential.html
doc/orm/session.html
doc/orm/session_api.html
doc/orm/session_basics.html
doc/orm/session_events.html
doc/orm/session_state_management.html
doc/orm/session_transaction.html
doc/orm/tutorial.html
doc/orm/versioning.html
doc/orm/extensions/associationproxy.html
doc/orm/extensions/automap.html
doc/orm/extensions/baked.html
doc/orm/extensions/horizontal_shard.html
doc/orm/extensions/hybrid.html
doc/orm/extensions/index.html
doc/orm/extensions/indexable.html
doc/orm/extensions/instrumentation.html
doc/orm/extensions/mutable.html
doc/orm/extensions/orderinglist.html
doc/orm/extensions/declarative/api.html
doc/orm/extensions/declarative/basic_use.html
doc/orm/extensions/declarative/index.html
doc/orm/extensions/declarative/inheritance.html
doc/orm/extensions/declarative/mixins.html
doc/orm/extensions/declarative/relationships.html
doc/orm/extensions/declarative/table_config.html
examples/__init__.py
examples/adjacency_list/__init__.py
examples/adjacency_list/adjacency_list.py
examples/association/__init__.py
examples/association/basic_association.py
examples/association/dict_of_sets_with_default.py
examples/association/proxied_association.py
examples/custom_attributes/__init__.py
examples/custom_attributes/active_column_defaults.py
examples/custom_attributes/custom_management.py
examples/custom_attributes/listen_for_events.py
examples/dogpile_caching/__init__.py
examples/dogpile_caching/advanced.py
examples/dogpile_caching/caching_query.py
examples/dogpile_caching/environment.py
examples/dogpile_caching/fixture_data.py
examples/dogpile_caching/helloworld.py
examples/dogpile_caching/local_session_caching.py
examples/dogpile_caching/model.py
examples/dogpile_caching/relationship_caching.py
examples/dynamic_dict/__init__.py
examples/dynamic_dict/dynamic_dict.py
examples/elementtree/__init__.py
examples/elementtree/adjacency_list.py
examples/elementtree/optimized_al.py
examples/elementtree/pickle_type.py
examples/elementtree/test.xml
examples/elementtree/test2.xml
examples/elementtree/test3.xml
examples/generic_associations/__init__.py
examples/generic_associations/discriminator_on_association.py
examples/generic_associations/generic_fk.py
examples/generic_associations/table_per_association.py
examples/generic_associations/table_per_related.py
examples/graphs/__init__.py
examples/graphs/directed_graph.py
examples/inheritance/__init__.py
examples/inheritance/concrete.py
examples/inheritance/joined.py
examples/inheritance/single.py
examples/join_conditions/__init__.py
examples/join_conditions/cast.py
examples/join_conditions/threeway.py
examples/large_collection/__init__.py
examples/large_collection/large_collection.py
examples/materialized_paths/__init__.py
examples/materialized_paths/materialized_paths.py
examples/nested_sets/__init__.py
examples/nested_sets/nested_sets.py
examples/performance/__init__.py
examples/performance/__main__.py
examples/performance/bulk_inserts.py
examples/performance/bulk_updates.py
examples/performance/large_resultsets.py
examples/performance/short_selects.py
examples/performance/single_inserts.py
examples/postgis/__init__.py
examples/postgis/postgis.py
examples/sharding/__init__.py
examples/sharding/attribute_shard.py
examples/space_invaders/__init__.py
examples/space_invaders/space_invaders.py
examples/versioned_history/__init__.py
examples/versioned_history/history_meta.py
examples/versioned_history/test_versioning.py
examples/versioned_rows/__init__.py
examples/versioned_rows/versioned_map.py
examples/versioned_rows/versioned_rows.py
examples/versioned_rows/versioned_rows_w_versionid.py
examples/versioned_rows/versioned_update_old_row.py
examples/vertical/__init__.py
examples/vertical/dictlike-polymorphic.py
examples/vertical/dictlike.py
lib/SQLAlchemy.egg-info/PKG-INFO
lib/SQLAlchemy.egg-info/SOURCES.txt
lib/SQLAlchemy.egg-info/dependency_links.txt
lib/SQLAlchemy.egg-info/requires.txt
lib/SQLAlchemy.egg-info/top_level.txt
lib/sqlalchemy/__init__.py
lib/sqlalchemy/events.py
lib/sqlalchemy/exc.py
lib/sqlalchemy/inspection.py
lib/sqlalchemy/interfaces.py
lib/sqlalchemy/log.py
lib/sqlalchemy/processors.py
lib/sqlalchemy/schema.py
lib/sqlalchemy/types.py
lib/sqlalchemy/cextension/processors.c
lib/sqlalchemy/cextension/resultproxy.c
lib/sqlalchemy/cextension/utils.c
lib/sqlalchemy/connectors/__init__.py
lib/sqlalchemy/connectors/mxodbc.py
lib/sqlalchemy/connectors/pyodbc.py
lib/sqlalchemy/connectors/zxJDBC.py
lib/sqlalchemy/databases/__init__.py
lib/sqlalchemy/dialects/__init__.py
lib/sqlalchemy/dialects/type_migration_guidelines.txt
lib/sqlalchemy/dialects/firebird/__init__.py
lib/sqlalchemy/dialects/firebird/base.py
lib/sqlalchemy/dialects/firebird/fdb.py
lib/sqlalchemy/dialects/firebird/kinterbasdb.py
lib/sqlalchemy/dialects/mssql/__init__.py
lib/sqlalchemy/dialects/mssql/adodbapi.py
lib/sqlalchemy/dialects/mssql/base.py
lib/sqlalchemy/dialects/mssql/information_schema.py
lib/sqlalchemy/dialects/mssql/mxodbc.py
lib/sqlalchemy/dialects/mssql/provision.py
lib/sqlalchemy/dialects/mssql/pymssql.py
lib/sqlalchemy/dialects/mssql/pyodbc.py
lib/sqlalchemy/dialects/mssql/zxjdbc.py
lib/sqlalchemy/dialects/mysql/__init__.py
lib/sqlalchemy/dialects/mysql/base.py
lib/sqlalchemy/dialects/mysql/cymysql.py
lib/sqlalchemy/dialects/mysql/dml.py
lib/sqlalchemy/dialects/mysql/enumerated.py
lib/sqlalchemy/dialects/mysql/gaerdbms.py
lib/sqlalchemy/dialects/mysql/json.py
lib/sqlalchemy/dialects/mysql/mysqlconnector.py
lib/sqlalchemy/dialects/mysql/mysqldb.py
lib/sqlalchemy/dialects/mysql/oursql.py
lib/sqlalchemy/dialects/mysql/provision.py
lib/sqlalchemy/dialects/mysql/pymysql.py
lib/sqlalchemy/dialects/mysql/pyodbc.py
lib/sqlalchemy/dialects/mysql/reflection.py
lib/sqlalchemy/dialects/mysql/types.py
lib/sqlalchemy/dialects/mysql/zxjdbc.py
lib/sqlalchemy/dialects/oracle/__init__.py
lib/sqlalchemy/dialects/oracle/base.py
lib/sqlalchemy/dialects/oracle/cx_oracle.py
lib/sqlalchemy/dialects/oracle/provision.py
lib/sqlalchemy/dialects/oracle/zxjdbc.py
lib/sqlalchemy/dialects/postgresql/__init__.py
lib/sqlalchemy/dialects/postgresql/array.py
lib/sqlalchemy/dialects/postgresql/base.py
lib/sqlalchemy/dialects/postgresql/dml.py
lib/sqlalchemy/dialects/postgresql/ext.py
lib/sqlalchemy/dialects/postgresql/hstore.py
lib/sqlalchemy/dialects/postgresql/json.py
lib/sqlalchemy/dialects/postgresql/pg8000.py
lib/sqlalchemy/dialects/postgresql/provision.py
lib/sqlalchemy/dialects/postgresql/psycopg2.py
lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py
lib/sqlalchemy/dialects/postgresql/pygresql.py
lib/sqlalchemy/dialects/postgresql/pypostgresql.py
lib/sqlalchemy/dialects/postgresql/ranges.py
lib/sqlalchemy/dialects/postgresql/zxjdbc.py
lib/sqlalchemy/dialects/sqlite/__init__.py
lib/sqlalchemy/dialects/sqlite/base.py
lib/sqlalchemy/dialects/sqlite/json.py
lib/sqlalchemy/dialects/sqlite/provision.py
lib/sqlalchemy/dialects/sqlite/pysqlcipher.py
lib/sqlalchemy/dialects/sqlite/pysqlite.py
lib/sqlalchemy/dialects/sybase/__init__.py
lib/sqlalchemy/dialects/sybase/base.py
lib/sqlalchemy/dialects/sybase/mxodbc.py
lib/sqlalchemy/dialects/sybase/pyodbc.py
lib/sqlalchemy/dialects/sybase/pysybase.py
lib/sqlalchemy/engine/__init__.py
lib/sqlalchemy/engine/base.py
lib/sqlalchemy/engine/default.py
lib/sqlalchemy/engine/interfaces.py
lib/sqlalchemy/engine/reflection.py
lib/sqlalchemy/engine/result.py
lib/sqlalchemy/engine/strategies.py
lib/sqlalchemy/engine/threadlocal.py
lib/sqlalchemy/engine/url.py
lib/sqlalchemy/engine/util.py
lib/sqlalchemy/event/__init__.py
lib/sqlalchemy/event/api.py
lib/sqlalchemy/event/attr.py
lib/sqlalchemy/event/base.py
lib/sqlalchemy/event/legacy.py
lib/sqlalchemy/event/registry.py
lib/sqlalchemy/ext/__init__.py
lib/sqlalchemy/ext/associationproxy.py
lib/sqlalchemy/ext/automap.py
lib/sqlalchemy/ext/baked.py
lib/sqlalchemy/ext/compiler.py
lib/sqlalchemy/ext/horizontal_shard.py
lib/sqlalchemy/ext/hybrid.py
lib/sqlalchemy/ext/indexable.py
lib/sqlalchemy/ext/instrumentation.py
lib/sqlalchemy/ext/mutable.py
lib/sqlalchemy/ext/orderinglist.py
lib/sqlalchemy/ext/serializer.py
lib/sqlalchemy/ext/declarative/__init__.py
lib/sqlalchemy/ext/declarative/api.py
lib/sqlalchemy/ext/declarative/base.py
lib/sqlalchemy/ext/declarative/clsregistry.py
lib/sqlalchemy/orm/__init__.py
lib/sqlalchemy/orm/attributes.py
lib/sqlalchemy/orm/base.py
lib/sqlalchemy/orm/collections.py
lib/sqlalchemy/orm/dependency.py
lib/sqlalchemy/orm/deprecated_interfaces.py
lib/sqlalchemy/orm/descriptor_props.py
lib/sqlalchemy/orm/dynamic.py
lib/sqlalchemy/orm/evaluator.py
lib/sqlalchemy/orm/events.py
lib/sqlalchemy/orm/exc.py
lib/sqlalchemy/orm/identity.py
lib/sqlalchemy/orm/instrumentation.py
lib/sqlalchemy/orm/interfaces.py
lib/sqlalchemy/orm/loading.py
lib/sqlalchemy/orm/mapper.py
lib/sqlalchemy/orm/path_registry.py
lib/sqlalchemy/orm/persistence.py
lib/sqlalchemy/orm/properties.py
lib/sqlalchemy/orm/query.py
lib/sqlalchemy/orm/relationships.py
lib/sqlalchemy/orm/scoping.py
lib/sqlalchemy/orm/session.py
lib/sqlalchemy/orm/state.py
lib/sqlalchemy/orm/strategies.py
lib/sqlalchemy/orm/strategy_options.py
lib/sqlalchemy/orm/sync.py
lib/sqlalchemy/orm/unitofwork.py
lib/sqlalchemy/orm/util.py
lib/sqlalchemy/pool/__init__.py
lib/sqlalchemy/pool/base.py
lib/sqlalchemy/pool/dbapi_proxy.py
lib/sqlalchemy/pool/impl.py
lib/sqlalchemy/sql/__init__.py
lib/sqlalchemy/sql/annotation.py
lib/sqlalchemy/sql/base.py
lib/sqlalchemy/sql/compiler.py
lib/sqlalchemy/sql/crud.py
lib/sqlalchemy/sql/ddl.py
lib/sqlalchemy/sql/default_comparator.py
lib/sqlalchemy/sql/dml.py
lib/sqlalchemy/sql/elements.py
lib/sqlalchemy/sql/expression.py
lib/sqlalchemy/sql/functions.py
lib/sqlalchemy/sql/naming.py
lib/sqlalchemy/sql/operators.py
lib/sqlalchemy/sql/schema.py
lib/sqlalchemy/sql/selectable.py
lib/sqlalchemy/sql/sqltypes.py
lib/sqlalchemy/sql/type_api.py
lib/sqlalchemy/sql/util.py
lib/sqlalchemy/sql/visitors.py
lib/sqlalchemy/testing/__init__.py
lib/sqlalchemy/testing/assertions.py
lib/sqlalchemy/testing/assertsql.py
lib/sqlalchemy/testing/config.py
lib/sqlalchemy/testing/engines.py
lib/sqlalchemy/testing/entities.py
lib/sqlalchemy/testing/exclusions.py
lib/sqlalchemy/testing/fixtures.py
lib/sqlalchemy/testing/mock.py
lib/sqlalchemy/testing/pickleable.py
lib/sqlalchemy/testing/profiling.py
lib/sqlalchemy/testing/provision.py
lib/sqlalchemy/testing/replay_fixture.py
lib/sqlalchemy/testing/requirements.py
lib/sqlalchemy/testing/schema.py
lib/sqlalchemy/testing/util.py
lib/sqlalchemy/testing/warnings.py
lib/sqlalchemy/testing/plugin/__init__.py
lib/sqlalchemy/testing/plugin/bootstrap.py
lib/sqlalchemy/testing/plugin/plugin_base.py
lib/sqlalchemy/testing/plugin/pytestplugin.py
lib/sqlalchemy/testing/suite/__init__.py
lib/sqlalchemy/testing/suite/test_cte.py
lib/sqlalchemy/testing/suite/test_ddl.py
lib/sqlalchemy/testing/suite/test_dialect.py
lib/sqlalchemy/testing/suite/test_insert.py
lib/sqlalchemy/testing/suite/test_reflection.py
lib/sqlalchemy/testing/suite/test_results.py
lib/sqlalchemy/testing/suite/test_select.py
lib/sqlalchemy/testing/suite/test_sequence.py
lib/sqlalchemy/testing/suite/test_types.py
lib/sqlalchemy/testing/suite/test_update_delete.py
lib/sqlalchemy/util/__init__.py
lib/sqlalchemy/util/_collections.py
lib/sqlalchemy/util/compat.py
lib/sqlalchemy/util/deprecations.py
lib/sqlalchemy/util/langhelpers.py
lib/sqlalchemy/util/queue.py
lib/sqlalchemy/util/topological.py
test/__init__.py
test/binary_data_one.dat
test/binary_data_two.dat
test/conftest.py
test/requirements.py
test/aaa_profiling/__init__.py
test/aaa_profiling/test_compiler.py
test/aaa_profiling/test_memusage.py
test/aaa_profiling/test_misc.py
test/aaa_profiling/test_orm.py
test/aaa_profiling/test_pool.py
test/aaa_profiling/test_resultset.py
test/aaa_profiling/test_zoomark.py
test/aaa_profiling/test_zoomark_orm.py
test/base/__init__.py
test/base/test_dependency.py
test/base/test_events.py
test/base/test_except.py
test/base/test_inspect.py
test/base/test_tutorials.py
test/base/test_utils.py
test/base/test_warnings.py
test/dialect/__init__.py
test/dialect/test_all.py
test/dialect/test_firebird.py
test/dialect/test_mxodbc.py
test/dialect/test_pyodbc.py
test/dialect/test_sqlite.py
test/dialect/test_suite.py
test/dialect/test_sybase.py
test/dialect/mssql/__init__.py
test/dialect/mssql/test_compiler.py
test/dialect/mssql/test_engine.py
test/dialect/mssql/test_query.py
test/dialect/mssql/test_reflection.py
test/dialect/mssql/test_types.py
test/dialect/mysql/__init__.py
test/dialect/mysql/test_compiler.py
test/dialect/mysql/test_dialect.py
test/dialect/mysql/test_for_update.py
test/dialect/mysql/test_on_duplicate.py
test/dialect/mysql/test_query.py
test/dialect/mysql/test_reflection.py
test/dialect/mysql/test_types.py
test/dialect/oracle/__init__.py
test/dialect/oracle/test_compiler.py
test/dialect/oracle/test_dialect.py
test/dialect/oracle/test_reflection.py
test/dialect/oracle/test_types.py
test/dialect/postgresql/__init__.py
test/dialect/postgresql/test_compiler.py
test/dialect/postgresql/test_dialect.py
test/dialect/postgresql/test_on_conflict.py
test/dialect/postgresql/test_query.py
test/dialect/postgresql/test_reflection.py
test/dialect/postgresql/test_types.py
test/engine/__init__.py
test/engine/test_bind.py
test/engine/test_ddlevents.py
test/engine/test_deprecations.py
test/engine/test_execute.py
test/engine/test_logging.py
test/engine/test_parseconnect.py
test/engine/test_pool.py
test/engine/test_processors.py
test/engine/test_reconnect.py
test/engine/test_reflection.py
test/engine/test_transaction.py
test/ext/__init__.py
test/ext/test_associationproxy.py
test/ext/test_automap.py
test/ext/test_baked.py
test/ext/test_compiler.py
test/ext/test_deprecations.py
test/ext/test_extendedattr.py
test/ext/test_horizontal_shard.py
test/ext/test_hybrid.py
test/ext/test_indexable.py
test/ext/test_mutable.py
test/ext/test_orderinglist.py
test/ext/test_serializer.py
test/ext/declarative/__init__.py
test/ext/declarative/test_basic.py
test/ext/declarative/test_clsregistry.py
test/ext/declarative/test_concurrency.py
test/ext/declarative/test_inheritance.py
test/ext/declarative/test_mixin.py
test/ext/declarative/test_reflection.py
test/orm/__init__.py
test/orm/_fixtures.py
test/orm/test_ac_relationships.py
test/orm/test_association.py
test/orm/test_assorted_eager.py
test/orm/test_attributes.py
test/orm/test_backref_mutations.py
test/orm/test_bind.py
test/orm/test_bulk.py
test/orm/test_bundle.py
test/orm/test_cascade.py
test/orm/test_collection.py
test/orm/test_compile.py
test/orm/test_composites.py
test/orm/test_cycles.py
test/orm/test_default_strategies.py
test/orm/test_defaults.py
test/orm/test_deferred.py
test/orm/test_deprecations.py
test/orm/test_descriptor.py
test/orm/test_dynamic.py
test/orm/test_eager_relations.py
test/orm/test_evaluator.py
test/orm/test_events.py
test/orm/test_expire.py
test/orm/test_froms.py
test/orm/test_generative.py
test/orm/test_hasparent.py
test/orm/test_immediate_load.py
test/orm/test_inspect.py
test/orm/test_instrumentation.py
test/orm/test_joins.py
test/orm/test_lazy_relations.py
test/orm/test_load_on_fks.py
test/orm/test_loading.py
test/orm/test_lockmode.py
test/orm/test_manytomany.py
test/orm/test_mapper.py
test/orm/test_merge.py
test/orm/test_naturalpks.py
test/orm/test_of_type.py
test/orm/test_onetoone.py
test/orm/test_options.py
test/orm/test_pickled.py
test/orm/test_query.py
test/orm/test_rel_fn.py
test/orm/test_relationships.py
test/orm/test_scoping.py
test/orm/test_selectable.py
test/orm/test_selectin_relations.py
test/orm/test_session.py
test/orm/test_subquery_relations.py
test/orm/test_sync.py
test/orm/test_transaction.py
test/orm/test_unitofwork.py
test/orm/test_unitofworkv2.py
test/orm/test_update_delete.py
test/orm/test_utils.py
test/orm/test_validators.py
test/orm/test_versioning.py
test/orm/inheritance/__init__.py
test/orm/inheritance/_poly_fixtures.py
test/orm/inheritance/test_abc_inheritance.py
test/orm/inheritance/test_abc_polymorphic.py
test/orm/inheritance/test_assorted_poly.py
test/orm/inheritance/test_basic.py
test/orm/inheritance/test_concrete.py
test/orm/inheritance/test_magazine.py
test/orm/inheritance/test_manytomany.py
test/orm/inheritance/test_poly_linked_list.py
test/orm/inheritance/test_poly_loading.py
test/orm/inheritance/test_poly_persistence.py
test/orm/inheritance/test_polymorphic_rel.py
test/orm/inheritance/test_productspec.py
test/orm/inheritance/test_relationship.py
test/orm/inheritance/test_selects.py
test/orm/inheritance/test_single.py
test/orm/inheritance/test_with_poly.py
test/perf/invalidate_stresstest.py
test/perf/orm2010.py
test/sql/__init__.py
test/sql/test_case_statement.py
test/sql/test_compiler.py
test/sql/test_computed.py
test/sql/test_constraints.py
test/sql/test_cte.py
test/sql/test_ddlemit.py
test/sql/test_defaults.py
test/sql/test_delete.py
test/sql/test_deprecations.py
test/sql/test_functions.py
test/sql/test_generative.py
test/sql/test_insert.py
test/sql/test_insert_exec.py
test/sql/test_inspect.py
test/sql/test_join_rewriting.py
test/sql/test_labels.py
test/sql/test_lateral.py
test/sql/test_metadata.py
test/sql/test_operators.py
test/sql/test_query.py
test/sql/test_quote.py
test/sql/test_resultset.py
test/sql/test_returning.py
test/sql/test_rowcount.py
test/sql/test_selectable.py
test/sql/test_sequences.py
test/sql/test_tablesample.py
test/sql/test_text.py
test/sql/test_type_expressions.py
test/sql/test_types.py
test/sql/test_unicode.py
test/sql/test_update.py
test/sql/test_utils.py
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/SQLAlchemy-1.3.18-py3.9.egg-info/requires.txt
|
[mssql]
pyodbc
[mssql_pymssql]
pymssql
[mssql_pyodbc]
pyodbc
[mysql]
mysqlclient
[oracle]
cx_oracle
[postgresql]
psycopg2
[postgresql_pg8000]
pg8000
[postgresql_psycopg2binary]
psycopg2-binary
[postgresql_psycopg2cffi]
psycopg2cffi
[pymysql]
pymysql
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/SQLAlchemy-1.3.18-py3.9.egg-info/top_level.txt
|
sqlalchemy
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/SQLAlchemy-1.3.18-py3.9.egg-info/dependency_links.txt
| 0 |
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/Werkzeug-1.0.1.dist-info/RECORD
|
Werkzeug-1.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
Werkzeug-1.0.1.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475
Werkzeug-1.0.1.dist-info/METADATA,sha256=d0zmVNa4UC2-nAo2A8_81oiy123D6JTGRSuY_Ymgyt4,4730
Werkzeug-1.0.1.dist-info/RECORD,,
Werkzeug-1.0.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
Werkzeug-1.0.1.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
Werkzeug-1.0.1.dist-info/top_level.txt,sha256=QRyj2VjwJoQkrwjwFIOlB8Xg3r9un0NtqVHQF-15xaw,9
werkzeug/__init__.py,sha256=rb-yPiXOjTLbtDOl5fQp5hN7oBdaoXAoQ-slAAvfZAo,502
werkzeug/__pycache__/__init__.cpython-39.pyc,,
werkzeug/__pycache__/_compat.cpython-39.pyc,,
werkzeug/__pycache__/_internal.cpython-39.pyc,,
werkzeug/__pycache__/_reloader.cpython-39.pyc,,
werkzeug/__pycache__/datastructures.cpython-39.pyc,,
werkzeug/__pycache__/exceptions.cpython-39.pyc,,
werkzeug/__pycache__/filesystem.cpython-39.pyc,,
werkzeug/__pycache__/formparser.cpython-39.pyc,,
werkzeug/__pycache__/http.cpython-39.pyc,,
werkzeug/__pycache__/local.cpython-39.pyc,,
werkzeug/__pycache__/posixemulation.cpython-39.pyc,,
werkzeug/__pycache__/routing.cpython-39.pyc,,
werkzeug/__pycache__/security.cpython-39.pyc,,
werkzeug/__pycache__/serving.cpython-39.pyc,,
werkzeug/__pycache__/test.cpython-39.pyc,,
werkzeug/__pycache__/testapp.cpython-39.pyc,,
werkzeug/__pycache__/urls.cpython-39.pyc,,
werkzeug/__pycache__/useragents.cpython-39.pyc,,
werkzeug/__pycache__/utils.cpython-39.pyc,,
werkzeug/__pycache__/wsgi.cpython-39.pyc,,
werkzeug/_compat.py,sha256=zjufTNrhQ8BgYSGSh-sVu6iW3r3O9WzjE9j-qJobx-g,6671
werkzeug/_internal.py,sha256=d_4AqheyS6dHMViwdc0drFrjs67ZzT6Ej2gWf-Z-Iys,14351
werkzeug/_reloader.py,sha256=I3mg3oRQ0lLzl06oEoVopN3bN7CtINuuUQdqDcmTnEs,11531
werkzeug/datastructures.py,sha256=AonxOcwU0TPMEzfKF1368ySULxHgxE-JE-DEAGdo2ts,100480
werkzeug/debug/__init__.py,sha256=3RtUMc5Y9hYyK11ugHltgkQ9Dt-ViR945Vy_X5NV7zU,17289
werkzeug/debug/__pycache__/__init__.cpython-39.pyc,,
werkzeug/debug/__pycache__/console.cpython-39.pyc,,
werkzeug/debug/__pycache__/repr.cpython-39.pyc,,
werkzeug/debug/__pycache__/tbtools.cpython-39.pyc,,
werkzeug/debug/console.py,sha256=OATaO7KHYMqpbzIFe1HeW9Mnl3wZgA3jMQoGDPn5URc,5488
werkzeug/debug/repr.py,sha256=lIwuhbyrMwVe3P_cFqNyqzHL7P93TLKod7lw9clydEw,9621
werkzeug/debug/shared/FONT_LICENSE,sha256=LwAVEI1oYnvXiNMT9SnCH_TaLCxCpeHziDrMg0gPkAI,4673
werkzeug/debug/shared/console.png,sha256=bxax6RXXlvOij_KeqvSNX0ojJf83YbnZ7my-3Gx9w2A,507
werkzeug/debug/shared/debugger.js,sha256=rOhqZMRfpZnnu6_XCGn6wMWPhtfwRAcyZKksdIxPJas,6400
werkzeug/debug/shared/jquery.js,sha256=CSXorXvZcTkaix6Yvo6HppcZGetbYMGWSFlBw8HfCJo,88145
werkzeug/debug/shared/less.png,sha256=-4-kNRaXJSONVLahrQKUxMwXGm9R4OnZ9SxDGpHlIR4,191
werkzeug/debug/shared/more.png,sha256=GngN7CioHQoV58rH6ojnkYi8c_qED2Aka5FO5UXrReY,200
werkzeug/debug/shared/source.png,sha256=RoGcBTE4CyCB85GBuDGTFlAnUqxwTBiIfDqW15EpnUQ,818
werkzeug/debug/shared/style.css,sha256=gZ9uhmb5zj3XLuT9RvnMp6jMINgQ-VVBCp-2AZbG3YQ,6604
werkzeug/debug/shared/ubuntu.ttf,sha256=1eaHFyepmy4FyDvjLVzpITrGEBu_CZYY94jE0nED1c0,70220
werkzeug/debug/tbtools.py,sha256=2iJ8RURUZUSbopOIehy53LnVJWx47lsHN2V2l6hc7Wc,20363
werkzeug/exceptions.py,sha256=UTYSDkmAsH-vt8VSidlEffwqBVNXuT7bRg-_NqgUe8A,25188
werkzeug/filesystem.py,sha256=HzKl-j0Hd8Jl66j778UbPTAYNnY6vUZgYLlBZ0e7uw0,2101
werkzeug/formparser.py,sha256=Sto0jZid9im9ZVIf56vilCdyX-arK33wSftkYsLCnzo,21788
werkzeug/http.py,sha256=KVRV3yFK14PJeI56qClEq4qxFdvKUQVy4C_dwuWz9_Q,43107
werkzeug/local.py,sha256=_Tk7gB238pPWUU7habxFkZF02fiCMRVW6d62YWL1Rh0,14371
werkzeug/middleware/__init__.py,sha256=f1SFZo67IlW4k1uqKzNHxYQlsakUS-D6KK_j0e3jjwQ,549
werkzeug/middleware/__pycache__/__init__.cpython-39.pyc,,
werkzeug/middleware/__pycache__/dispatcher.cpython-39.pyc,,
werkzeug/middleware/__pycache__/http_proxy.cpython-39.pyc,,
werkzeug/middleware/__pycache__/lint.cpython-39.pyc,,
werkzeug/middleware/__pycache__/profiler.cpython-39.pyc,,
werkzeug/middleware/__pycache__/proxy_fix.cpython-39.pyc,,
werkzeug/middleware/__pycache__/shared_data.cpython-39.pyc,,
werkzeug/middleware/dispatcher.py,sha256=_-KoMzHtcISHS7ouWKAOraqlCLprdh83YOAn_8DjLp8,2240
werkzeug/middleware/http_proxy.py,sha256=lRjTdMmghHiZuZrS7_UJ3gZc-vlFizhBbFZ-XZPLwIA,7117
werkzeug/middleware/lint.py,sha256=ItTwuWJnflF8xMT1uqU_Ty1ryhux-CjeUfskqaUpxsw,12967
werkzeug/middleware/profiler.py,sha256=8B_s23d6BGrU_q54gJsm6kcCbOJbTSqrXCsioHON0Xs,4471
werkzeug/middleware/proxy_fix.py,sha256=K5oZ3DPXOzdZi0Xba5zW7ClPOxgUuqXHQHvY2-AWCGw,6431
werkzeug/middleware/shared_data.py,sha256=sPSRTKqtKSVBUyN8fr6jOJbdq9cdOLu6pg3gz4Y_1Xo,9599
werkzeug/posixemulation.py,sha256=gSSiv1SCmOyzOM_nq1ZaZCtxP__C5MeDJl_4yXJmi4Q,3541
werkzeug/routing.py,sha256=6-iZ7CKeUILYAehoKXLbmi5E6LgLbwuzUh8TNplnf5Q,79019
werkzeug/security.py,sha256=81149MplFq7-hD4RK4sKp9kzXXejjV9D4lWBzaRyeQ8,8106
werkzeug/serving.py,sha256=YvTqvurA-Mnj8mkqRe2kBdVr2ap4ibCq1ByQjOA6g1w,38694
werkzeug/test.py,sha256=GJ9kxTMSJ-nB7kfGtxuROr9JGmXxDRev-2U1SkeUJGE,39564
werkzeug/testapp.py,sha256=bHekqMsqRfVxwgFbvOMem-DYa_sdB7R47yUXpt1RUTo,9329
werkzeug/urls.py,sha256=T8-hV_1vwhu6xhX93FwsHteK-W-kIE2orj5WoMf-WFw,39322
werkzeug/useragents.py,sha256=TSoGv5IOvP375eK5gLLpsLQCeUgTR6sO1WftmAP_YvM,5563
werkzeug/utils.py,sha256=hrVK4u_wi8z9viBO9bgOLlm1aaIvCpn-p2d1FeZQDEo,25251
werkzeug/wrappers/__init__.py,sha256=S4VioKAmF_av9Ec9zQvG71X1EOkYfPx1TYck9jyDiyY,1384
werkzeug/wrappers/__pycache__/__init__.cpython-39.pyc,,
werkzeug/wrappers/__pycache__/accept.cpython-39.pyc,,
werkzeug/wrappers/__pycache__/auth.cpython-39.pyc,,
werkzeug/wrappers/__pycache__/base_request.cpython-39.pyc,,
werkzeug/wrappers/__pycache__/base_response.cpython-39.pyc,,
werkzeug/wrappers/__pycache__/common_descriptors.cpython-39.pyc,,
werkzeug/wrappers/__pycache__/cors.cpython-39.pyc,,
werkzeug/wrappers/__pycache__/etag.cpython-39.pyc,,
werkzeug/wrappers/__pycache__/json.cpython-39.pyc,,
werkzeug/wrappers/__pycache__/request.cpython-39.pyc,,
werkzeug/wrappers/__pycache__/response.cpython-39.pyc,,
werkzeug/wrappers/__pycache__/user_agent.cpython-39.pyc,,
werkzeug/wrappers/accept.py,sha256=TIvjUc0g73fhTWX54wg_D9NNzKvpnG1X8u1w26tK1o8,1760
werkzeug/wrappers/auth.py,sha256=Pmn6iaGHBrUyHbJpW0lZhO_q9RVoAa5QalaTqcavdAI,1158
werkzeug/wrappers/base_request.py,sha256=4TuGlKWeKQdlq4eU94hJYcXSfWo8Rk7CS1Ef5lJ3ZM0,26012
werkzeug/wrappers/base_response.py,sha256=JTxJZ8o-IBetpoWJqt2HFwPaNWNDAlM3_GXJe1Whw80,27784
werkzeug/wrappers/common_descriptors.py,sha256=X2Ktd5zUWsmcd4ciaF62Dd8Lru9pLGP_XDUNukc8cXs,12829
werkzeug/wrappers/cors.py,sha256=XMbaCol4dWTGvb-dCJBoN0p3JX91v93AIAHd7tnB3L4,3466
werkzeug/wrappers/etag.py,sha256=XMXtyfByBsOjxwaX8U7ZtUY7JXkbQLP45oXZ0qkyTNs,12217
werkzeug/wrappers/json.py,sha256=HvK_A4NpO0sLqgb10sTJcoZydYOwyNiPCJPV7SVgcgE,4343
werkzeug/wrappers/request.py,sha256=QbHGqDpGPN684pnOPEokwkPESfm-NnfYM7ydOMxW_NI,1514
werkzeug/wrappers/response.py,sha256=Oqv8TMG_dnOKTq_V30ddgkO5B7IJhkVPODvm7cbhZ3c,2524
werkzeug/wrappers/user_agent.py,sha256=YJb-vr12cujG7sQMG9V89VsJa-03SWSenhg1W4cT0EY,435
werkzeug/wsgi.py,sha256=ZGk85NzRyQTzkYis-xl8V9ydJgfClBdStvhzDzER2mw,34367
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/Werkzeug-1.0.1.dist-info/WHEEL
|
Wheel-Version: 1.0
Generator: bdist_wheel (0.34.2)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/Werkzeug-1.0.1.dist-info/LICENSE.rst
|
Copyright 2007 Pallets
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/Werkzeug-1.0.1.dist-info/top_level.txt
|
werkzeug
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/Werkzeug-1.0.1.dist-info/INSTALLER
|
pip
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/Werkzeug-1.0.1.dist-info/METADATA
|
Metadata-Version: 2.1
Name: Werkzeug
Version: 1.0.1
Summary: The comprehensive WSGI web application library.
Home-page: https://palletsprojects.com/p/werkzeug/
Author: Armin Ronacher
Author-email: armin.ronacher@active-4.com
Maintainer: Pallets
Maintainer-email: contact@palletsprojects.com
License: BSD-3-Clause
Project-URL: Documentation, https://werkzeug.palletsprojects.com/
Project-URL: Code, https://github.com/pallets/werkzeug
Project-URL: Issue tracker, https://github.com/pallets/werkzeug/issues
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Web Environment
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
Classifier: Topic :: Internet :: WWW/HTTP :: WSGI
Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Application
Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware
Classifier: Topic :: Software Development :: Libraries :: Application Frameworks
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
Description-Content-Type: text/x-rst
Provides-Extra: dev
Requires-Dist: pytest ; extra == 'dev'
Requires-Dist: pytest-timeout ; extra == 'dev'
Requires-Dist: coverage ; extra == 'dev'
Requires-Dist: tox ; extra == 'dev'
Requires-Dist: sphinx ; extra == 'dev'
Requires-Dist: pallets-sphinx-themes ; extra == 'dev'
Requires-Dist: sphinx-issues ; extra == 'dev'
Provides-Extra: watchdog
Requires-Dist: watchdog ; extra == 'watchdog'
Werkzeug
========
*werkzeug* German noun: "tool". Etymology: *werk* ("work"), *zeug* ("stuff")
Werkzeug is a comprehensive `WSGI`_ web application library. It began as
a simple collection of various utilities for WSGI applications and has
become one of the most advanced WSGI utility libraries.
It includes:
- An interactive debugger that allows inspecting stack traces and
source code in the browser with an interactive interpreter for any
frame in the stack.
- A full-featured request object with objects to interact with
headers, query args, form data, files, and cookies.
- A response object that can wrap other WSGI applications and handle
streaming data.
- A routing system for matching URLs to endpoints and generating URLs
for endpoints, with an extensible system for capturing variables
from URLs.
- HTTP utilities to handle entity tags, cache control, dates, user
agents, cookies, files, and more.
- A threaded WSGI server for use while developing applications
locally.
- A test client for simulating HTTP requests during testing without
requiring running a server.
Werkzeug is Unicode aware and doesn't enforce any dependencies. It is up
to the developer to choose a template engine, database adapter, and even
how to handle requests. It can be used to build all sorts of end user
applications such as blogs, wikis, or bulletin boards.
`Flask`_ wraps Werkzeug, using it to handle the details of WSGI while
providing more structure and patterns for defining powerful
applications.
Installing
----------
Install and update using `pip`_:
.. code-block:: text
pip install -U Werkzeug
A Simple Example
----------------
.. code-block:: python
from werkzeug.wrappers import Request, Response
@Request.application
def application(request):
return Response('Hello, World!')
if __name__ == '__main__':
from werkzeug.serving import run_simple
run_simple('localhost', 4000, application)
Links
-----
- Website: https://palletsprojects.com/p/werkzeug/
- Documentation: https://werkzeug.palletsprojects.com/
- Releases: https://pypi.org/project/Werkzeug/
- Code: https://github.com/pallets/werkzeug
- Issue tracker: https://github.com/pallets/werkzeug/issues
- Test status: https://dev.azure.com/pallets/werkzeug/_build
- Official chat: https://discord.gg/t6rrQZH
.. _WSGI: https://wsgi.readthedocs.io/en/latest/
.. _Flask: https://www.palletsprojects.com/p/flask/
.. _pip: https://pip.pypa.io/en/stable/quickstart/
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/tzfile.py
|
#!/usr/bin/env python
'''
$Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $
'''
from datetime import datetime
from struct import unpack, calcsize
from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo
from pytz.tzinfo import memorized_datetime, memorized_timedelta
def _byte_string(s):
"""Cast a string or byte string to an ASCII byte string."""
return s.encode('ASCII')
_NULL = _byte_string('\0')
def _std_string(s):
"""Cast a string or byte string to an ASCII string."""
return str(s.decode('ASCII'))
def build_tzinfo(zone, fp):
head_fmt = '>4s c 15x 6l'
head_size = calcsize(head_fmt)
(magic, format, ttisgmtcnt, ttisstdcnt, leapcnt, timecnt,
typecnt, charcnt) = unpack(head_fmt, fp.read(head_size))
# Make sure it is a tzfile(5) file
assert magic == _byte_string('TZif'), 'Got magic %s' % repr(magic)
# Read out the transition times, localtime indices and ttinfo structures.
data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict(
timecnt=timecnt, ttinfo='lBB' * typecnt, charcnt=charcnt)
data_size = calcsize(data_fmt)
data = unpack(data_fmt, fp.read(data_size))
# make sure we unpacked the right number of values
assert len(data) == 2 * timecnt + 3 * typecnt + 1
transitions = [memorized_datetime(trans)
for trans in data[:timecnt]]
lindexes = list(data[timecnt:2 * timecnt])
ttinfo_raw = data[2 * timecnt:-1]
tznames_raw = data[-1]
del data
# Process ttinfo into separate structs
ttinfo = []
tznames = {}
i = 0
while i < len(ttinfo_raw):
# have we looked up this timezone name yet?
tzname_offset = ttinfo_raw[i + 2]
if tzname_offset not in tznames:
nul = tznames_raw.find(_NULL, tzname_offset)
if nul < 0:
nul = len(tznames_raw)
tznames[tzname_offset] = _std_string(
tznames_raw[tzname_offset:nul])
ttinfo.append((ttinfo_raw[i],
bool(ttinfo_raw[i + 1]),
tznames[tzname_offset]))
i += 3
# Now build the timezone object
if len(ttinfo) == 1 or len(transitions) == 0:
ttinfo[0][0], ttinfo[0][2]
cls = type(zone, (StaticTzInfo,), dict(
zone=zone,
_utcoffset=memorized_timedelta(ttinfo[0][0]),
_tzname=ttinfo[0][2]))
else:
# Early dates use the first standard time ttinfo
i = 0
while ttinfo[i][1]:
i += 1
if ttinfo[i] == ttinfo[lindexes[0]]:
transitions[0] = datetime.min
else:
transitions.insert(0, datetime.min)
lindexes.insert(0, i)
# calculate transition info
transition_info = []
for i in range(len(transitions)):
inf = ttinfo[lindexes[i]]
utcoffset = inf[0]
if not inf[1]:
dst = 0
else:
for j in range(i - 1, -1, -1):
prev_inf = ttinfo[lindexes[j]]
if not prev_inf[1]:
break
dst = inf[0] - prev_inf[0] # dst offset
# Bad dst? Look further. DST > 24 hours happens when
# a timzone has moved across the international dateline.
if dst <= 0 or dst > 3600 * 3:
for j in range(i + 1, len(transitions)):
stdinf = ttinfo[lindexes[j]]
if not stdinf[1]:
dst = inf[0] - stdinf[0]
if dst > 0:
break # Found a useful std time.
tzname = inf[2]
# Round utcoffset and dst to the nearest minute or the
# datetime library will complain. Conversions to these timezones
# might be up to plus or minus 30 seconds out, but it is
# the best we can do.
utcoffset = int((utcoffset + 30) // 60) * 60
dst = int((dst + 30) // 60) * 60
transition_info.append(memorized_ttinfo(utcoffset, dst, tzname))
cls = type(zone, (DstTzInfo,), dict(
zone=zone,
_utc_transition_times=transitions,
_transition_info=transition_info))
return cls()
if __name__ == '__main__':
import os.path
from pprint import pprint
base = os.path.join(os.path.dirname(__file__), 'zoneinfo')
tz = build_tzinfo('Australia/Melbourne',
open(os.path.join(base, 'Australia', 'Melbourne'), 'rb'))
tz = build_tzinfo('US/Eastern',
open(os.path.join(base, 'US', 'Eastern'), 'rb'))
pprint(tz._utc_transition_times)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/tzinfo.py
|
'''Base classes and helpers for building zone specific tzinfo classes'''
from datetime import datetime, timedelta, tzinfo
from bisect import bisect_right
try:
set
except NameError:
from sets import Set as set
import pytz
from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError
__all__ = []
_timedelta_cache = {}
def memorized_timedelta(seconds):
'''Create only one instance of each distinct timedelta'''
try:
return _timedelta_cache[seconds]
except KeyError:
delta = timedelta(seconds=seconds)
_timedelta_cache[seconds] = delta
return delta
_epoch = datetime.utcfromtimestamp(0)
_datetime_cache = {0: _epoch}
def memorized_datetime(seconds):
'''Create only one instance of each distinct datetime'''
try:
return _datetime_cache[seconds]
except KeyError:
# NB. We can't just do datetime.utcfromtimestamp(seconds) as this
# fails with negative values under Windows (Bug #90096)
dt = _epoch + timedelta(seconds=seconds)
_datetime_cache[seconds] = dt
return dt
_ttinfo_cache = {}
def memorized_ttinfo(*args):
'''Create only one instance of each distinct tuple'''
try:
return _ttinfo_cache[args]
except KeyError:
ttinfo = (
memorized_timedelta(args[0]),
memorized_timedelta(args[1]),
args[2]
)
_ttinfo_cache[args] = ttinfo
return ttinfo
_notime = memorized_timedelta(0)
def _to_seconds(td):
'''Convert a timedelta to seconds'''
return td.seconds + td.days * 24 * 60 * 60
class BaseTzInfo(tzinfo):
# Overridden in subclass
_utcoffset = None
_tzname = None
zone = None
def __str__(self):
return self.zone
class StaticTzInfo(BaseTzInfo):
'''A timezone that has a constant offset from UTC
These timezones are rare, as most locations have changed their
offset at some point in their history
'''
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if dt.tzinfo is not None and dt.tzinfo is not self:
raise ValueError('fromutc: dt.tzinfo is not self')
return (dt + self._utcoffset).replace(tzinfo=self)
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return _notime
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._tzname
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime.
This is normally a no-op, as StaticTzInfo timezones never have
ambiguous cases to correct:
>>> from pytz import timezone
>>> gmt = timezone('GMT')
>>> isinstance(gmt, StaticTzInfo)
True
>>> dt = datetime(2011, 5, 8, 1, 2, 3, tzinfo=gmt)
>>> gmt.normalize(dt) is dt
True
The supported method of converting between timezones is to use
datetime.astimezone(). Currently normalize() also works:
>>> la = timezone('America/Los_Angeles')
>>> dt = la.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> gmt.normalize(dt).strftime(fmt)
'2011-05-07 08:02:03 GMT (+0000)'
'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return '<StaticTzInfo %r>' % (self.zone,)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (self.zone,)
class DstTzInfo(BaseTzInfo):
'''A timezone that has a variable offset from UTC
The offset might change if daylight saving time comes into effect,
or at a point in history when the region decides to change their
timezone definition.
'''
# Overridden in subclass
# Sorted list of DST transition times, UTC
_utc_transition_times = None
# [(utcoffset, dstoffset, tzname)] corresponding to
# _utc_transition_times entries
_transition_info = None
zone = None
# Set in __init__
_tzinfos = None
_dst = None # DST offset
def __init__(self, _inf=None, _tzinfos=None):
if _inf:
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = _inf
else:
_tzinfos = {}
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = (
self._transition_info[0])
_tzinfos[self._transition_info[0]] = self
for inf in self._transition_info[1:]:
if inf not in _tzinfos:
_tzinfos[inf] = self.__class__(inf, _tzinfos)
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if (dt.tzinfo is not None and
getattr(dt.tzinfo, '_tzinfos', None) is not self._tzinfos):
raise ValueError('fromutc: dt.tzinfo is not self')
dt = dt.replace(tzinfo=None)
idx = max(0, bisect_right(self._utc_transition_times, dt) - 1)
inf = self._transition_info[idx]
return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf])
def normalize(self, dt):
'''Correct the timezone information on the given datetime
If date arithmetic crosses DST boundaries, the tzinfo
is not magically adjusted. This method normalizes the
tzinfo to the correct one.
To test, first we need to do some setup
>>> from pytz import timezone
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
We next create a datetime right on an end-of-DST transition point,
the instant when the wallclocks are wound back one hour.
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
Now, if we subtract a few minutes from it, note that the timezone
information has not changed.
>>> before = loc_dt - timedelta(minutes=10)
>>> before.strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
But we can fix that by calling the normalize method
>>> before = eastern.normalize(before)
>>> before.strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
The supported method of converting between timezones is to use
datetime.astimezone(). Currently, normalize() also works:
>>> th = timezone('Asia/Bangkok')
>>> am = timezone('Europe/Amsterdam')
>>> dt = th.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> am.normalize(dt).strftime(fmt)
'2011-05-06 20:02:03 CEST (+0200)'
'''
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
# Convert dt in localtime to UTC
offset = dt.tzinfo._utcoffset
dt = dt.replace(tzinfo=None)
dt = dt - offset
# convert it back, and return it
return self.fromutc(dt)
def localize(self, dt, is_dst=False):
'''Convert naive time to local time.
This method should be used to construct localtimes, rather
than passing a tzinfo argument to a datetime constructor.
is_dst is used to determine the correct timezone in the ambigous
period at the end of daylight saving time.
>>> from pytz import timezone
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> amdam = timezone('Europe/Amsterdam')
>>> dt = datetime(2004, 10, 31, 2, 0, 0)
>>> loc_dt1 = amdam.localize(dt, is_dst=True)
>>> loc_dt2 = amdam.localize(dt, is_dst=False)
>>> loc_dt1.strftime(fmt)
'2004-10-31 02:00:00 CEST (+0200)'
>>> loc_dt2.strftime(fmt)
'2004-10-31 02:00:00 CET (+0100)'
>>> str(loc_dt2 - loc_dt1)
'1:00:00'
Use is_dst=None to raise an AmbiguousTimeError for ambiguous
times at the end of daylight saving time
>>> try:
... loc_dt1 = amdam.localize(dt, is_dst=None)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
is_dst defaults to False
>>> amdam.localize(dt) == amdam.localize(dt, False)
True
is_dst is also used to determine the correct timezone in the
wallclock times jumped over at the start of daylight saving time.
>>> pacific = timezone('US/Pacific')
>>> dt = datetime(2008, 3, 9, 2, 0, 0)
>>> ploc_dt1 = pacific.localize(dt, is_dst=True)
>>> ploc_dt2 = pacific.localize(dt, is_dst=False)
>>> ploc_dt1.strftime(fmt)
'2008-03-09 02:00:00 PDT (-0700)'
>>> ploc_dt2.strftime(fmt)
'2008-03-09 02:00:00 PST (-0800)'
>>> str(ploc_dt2 - ploc_dt1)
'1:00:00'
Use is_dst=None to raise a NonExistentTimeError for these skipped
times.
>>> try:
... loc_dt1 = pacific.localize(dt, is_dst=None)
... except NonExistentTimeError:
... print('Non-existent')
Non-existent
'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
# Find the two best possibilities.
possible_loc_dt = set()
for delta in [timedelta(days=-1), timedelta(days=1)]:
loc_dt = dt + delta
idx = max(0, bisect_right(
self._utc_transition_times, loc_dt) - 1)
inf = self._transition_info[idx]
tzinfo = self._tzinfos[inf]
loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo))
if loc_dt.replace(tzinfo=None) == dt:
possible_loc_dt.add(loc_dt)
if len(possible_loc_dt) == 1:
return possible_loc_dt.pop()
# If there are no possibly correct timezones, we are attempting
# to convert a time that never happened - the time period jumped
# during the start-of-DST transition period.
if len(possible_loc_dt) == 0:
# If we refuse to guess, raise an exception.
if is_dst is None:
raise NonExistentTimeError(dt)
# If we are forcing the pre-DST side of the DST transition, we
# obtain the correct timezone by winding the clock forward a few
# hours.
elif is_dst:
return self.localize(
dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6)
# If we are forcing the post-DST side of the DST transition, we
# obtain the correct timezone by winding the clock back.
else:
return self.localize(
dt - timedelta(hours=6),
is_dst=False) + timedelta(hours=6)
# If we get this far, we have multiple possible timezones - this
# is an ambiguous case occuring during the end-of-DST transition.
# If told to be strict, raise an exception since we have an
# ambiguous case
if is_dst is None:
raise AmbiguousTimeError(dt)
# Filter out the possiblilities that don't match the requested
# is_dst
filtered_possible_loc_dt = [
p for p in possible_loc_dt if bool(p.tzinfo._dst) == is_dst
]
# Hopefully we only have one possibility left. Return it.
if len(filtered_possible_loc_dt) == 1:
return filtered_possible_loc_dt[0]
if len(filtered_possible_loc_dt) == 0:
filtered_possible_loc_dt = list(possible_loc_dt)
# If we get this far, we have in a wierd timezone transition
# where the clocks have been wound back but is_dst is the same
# in both (eg. Europe/Warsaw 1915 when they switched to CET).
# At this point, we just have to guess unless we allow more
# hints to be passed in (such as the UTC offset or abbreviation),
# but that is just getting silly.
#
# Choose the earliest (by UTC) applicable timezone if is_dst=True
# Choose the latest (by UTC) applicable timezone if is_dst=False
# i.e., behave like end-of-DST transition
dates = {} # utc -> local
for local_dt in filtered_possible_loc_dt:
utc_time = (
local_dt.replace(tzinfo=None) - local_dt.tzinfo._utcoffset)
assert utc_time not in dates
dates[utc_time] = local_dt
return dates[[min, max][not is_dst](dates)]
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> str(tz.utcoffset(ambiguous, is_dst=False))
'-1 day, 20:30:00'
>>> str(tz.utcoffset(ambiguous, is_dst=True))
'-1 day, 21:30:00'
>>> try:
... tz.utcoffset(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._utcoffset
else:
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> str(tz.dst(normal))
'1:00:00'
>>> str(tz.dst(normal, is_dst=False))
'1:00:00'
>>> str(tz.dst(normal, is_dst=True))
'1:00:00'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> str(tz.dst(ambiguous, is_dst=False))
'0:00:00'
>>> str(tz.dst(ambiguous, is_dst=True))
'1:00:00'
>>> try:
... tz.dst(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._dst
else:
return self._dst
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.tzname(normal)
'NDT'
>>> tz.tzname(normal, is_dst=False)
'NDT'
>>> tz.tzname(normal, is_dst=True)
'NDT'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.tzname(ambiguous, is_dst=False)
'NST'
>>> tz.tzname(ambiguous, is_dst=True)
'NDT'
>>> try:
... tz.tzname(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return self.zone
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._tzname
else:
return self._tzname
def __repr__(self):
if self._dst:
dst = 'DST'
else:
dst = 'STD'
if self._utcoffset > _notime:
return '<DstTzInfo %r %s+%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
else:
return '<DstTzInfo %r %s%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (
self.zone,
_to_seconds(self._utcoffset),
_to_seconds(self._dst),
self._tzname
)
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
"""Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade.
"""
# Raises a KeyError if zone no longer exists, which should never happen
# and would be a bug.
tz = pytz.timezone(zone)
# A StaticTzInfo - just return it
if utcoffset is None:
return tz
# This pickle was created from a DstTzInfo. We need to
# determine which of the list of tzinfo instances for this zone
# to use in order to restore the state of any datetime instances using
# it correctly.
utcoffset = memorized_timedelta(utcoffset)
dstoffset = memorized_timedelta(dstoffset)
try:
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
except KeyError:
# The particular state requested in this timezone no longer exists.
# This indicates a corrupt pickle, or the timezone database has been
# corrected violently enough to make this particular
# (utcoffset,dstoffset) no longer exist in the zone, or the
# abbreviation has been changed.
pass
# See if we can find an entry differing only by tzname. Abbreviations
# get changed from the initial guess by the database maintainers to
# match reality when this information is discovered.
for localized_tz in tz._tzinfos.values():
if (localized_tz._utcoffset == utcoffset and
localized_tz._dst == dstoffset):
return localized_tz
# This (utcoffset, dstoffset) information has been removed from the
# zone. Add it back. This might occur when the database maintainers have
# corrected incorrect information. datetime instances using this
# incorrect information will continue to do so, exactly as they were
# before being pickled. This is purely an overly paranoid safety net - I
# doubt this will ever been needed in real life.
inf = (utcoffset, dstoffset, tzname)
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
return tz._tzinfos[inf]
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/__init__.py
|
'''
datetime.tzinfo timezone definitions generated from the
Olson timezone database:
ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
See the datetime section of the Python Library Reference for information
on how to use these modules.
'''
import sys
import datetime
import os.path
from pytz.exceptions import AmbiguousTimeError
from pytz.exceptions import InvalidTimeError
from pytz.exceptions import NonExistentTimeError
from pytz.exceptions import UnknownTimeZoneError
from pytz.lazy import LazyDict, LazyList, LazySet # noqa
from pytz.tzinfo import unpickler, BaseTzInfo
from pytz.tzfile import build_tzinfo
# The IANA (nee Olson) database is updated several times a year.
OLSON_VERSION = '2020a'
VERSION = '2020.1' # pip compatible version number.
__version__ = VERSION
OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling
__all__ = [
'timezone', 'utc', 'country_timezones', 'country_names',
'AmbiguousTimeError', 'InvalidTimeError',
'NonExistentTimeError', 'UnknownTimeZoneError',
'all_timezones', 'all_timezones_set',
'common_timezones', 'common_timezones_set',
'BaseTzInfo', 'FixedOffset',
]
if sys.version_info[0] > 2: # Python 3.x
# Python 3.x doesn't have unicode(), making writing code
# for Python 2.3 and Python 3.x a pain.
unicode = str
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii('\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
if type(s) == bytes:
s = s.decode('ASCII')
else:
s.encode('ASCII') # Raise an exception if not ASCII
return s # But the string - not a byte string.
else: # Python 2.x
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii(u'Hello')
'Hello'
>>> ascii(u'\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
return s.encode('ASCII')
def open_resource(name):
"""Open a resource from the zoneinfo subdir for reading.
Uses the pkg_resources module if available and no standard file
found at the calculated location.
It is possible to specify different location for zoneinfo
subdir by using the PYTZ_TZDATADIR environment variable.
"""
name_parts = name.lstrip('/').split('/')
for part in name_parts:
if part == os.path.pardir or os.path.sep in part:
raise ValueError('Bad path segment: %r' % part)
zoneinfo_dir = os.environ.get('PYTZ_TZDATADIR', None)
if zoneinfo_dir is not None:
filename = os.path.join(zoneinfo_dir, *name_parts)
else:
filename = os.path.join(os.path.dirname(__file__),
'zoneinfo', *name_parts)
if not os.path.exists(filename):
# http://bugs.launchpad.net/bugs/383171 - we avoid using this
# unless absolutely necessary to help when a broken version of
# pkg_resources is installed.
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
if resource_stream is not None:
return resource_stream(__name__, 'zoneinfo/' + name)
return open(filename, 'rb')
def resource_exists(name):
"""Return true if the given resource exists"""
try:
open_resource(name).close()
return True
except IOError:
return False
_tzinfo_cache = {}
def timezone(zone):
r''' Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(unicode('US/Eastern')) is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> try:
... timezone('Asia/Shangri-La')
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
>>> try:
... timezone(unicode('\N{TRADE MARK SIGN}'))
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
'''
if zone is None:
raise UnknownTimeZoneError(None)
if zone.upper() == 'UTC':
return utc
try:
zone = ascii(zone)
except UnicodeEncodeError:
# All valid timezones are ASCII
raise UnknownTimeZoneError(zone)
zone = _case_insensitive_zone_lookup(_unmunge_zone(zone))
if zone not in _tzinfo_cache:
if zone in all_timezones_set: # noqa
fp = open_resource(zone)
try:
_tzinfo_cache[zone] = build_tzinfo(zone, fp)
finally:
fp.close()
else:
raise UnknownTimeZoneError(zone)
return _tzinfo_cache[zone]
def _unmunge_zone(zone):
"""Undo the time zone name munging done by older versions of pytz."""
return zone.replace('_plus_', '+').replace('_minus_', '-')
_all_timezones_lower_to_standard = None
def _case_insensitive_zone_lookup(zone):
"""case-insensitively matching timezone, else return zone unchanged"""
global _all_timezones_lower_to_standard
if _all_timezones_lower_to_standard is None:
_all_timezones_lower_to_standard = dict((tz.lower(), tz) for tz in all_timezones) # noqa
return _all_timezones_lower_to_standard.get(zone.lower()) or zone # noqa
ZERO = datetime.timedelta(0)
HOUR = datetime.timedelta(hours=1)
class UTC(BaseTzInfo):
"""UTC
Optimized UTC implementation. It unpickles using the single module global
instance defined beneath this class declaration.
"""
zone = "UTC"
_utcoffset = ZERO
_dst = ZERO
_tzname = zone
def fromutc(self, dt):
if dt.tzinfo is None:
return self.localize(dt)
return super(utc.__class__, self).fromutc(dt)
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def __reduce__(self):
return _UTC, ()
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
UTC = utc = UTC() # UTC is a singleton
def _UTC():
"""Factory function for utc unpickling.
Makes sure that unpickling a utc instance always returns the same
module global.
These examples belong in the UTC class above, but it is obscured; or in
the README.rst, but we are not depending on Python 2.4 so integrating
the README.rst examples with the unit tests is not trivial.
>>> import datetime, pickle
>>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc)
>>> naive = dt.replace(tzinfo=None)
>>> p = pickle.dumps(dt, 1)
>>> naive_p = pickle.dumps(naive, 1)
>>> len(p) - len(naive_p)
17
>>> new = pickle.loads(p)
>>> new == dt
True
>>> new is dt
False
>>> new.tzinfo is dt.tzinfo
True
>>> utc is UTC is timezone('UTC')
True
>>> utc is timezone('GMT')
False
"""
return utc
_UTC.__safe_for_unpickling__ = True
def _p(*args):
"""Factory function for unpickling pytz tzinfo instances.
Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle
by shortening the path.
"""
return unpickler(*args)
_p.__safe_for_unpickling__ = True
class _CountryTimezoneDict(LazyDict):
"""Map ISO 3166 country code to a list of timezone names commonly used
in that country.
iso3166_code is the two letter code used to identify the country.
>>> def print_list(list_of_strings):
... 'We use a helper so doctests work under Python 2.3 -> 3.x'
... for s in list_of_strings:
... print(s)
>>> print_list(country_timezones['nz'])
Pacific/Auckland
Pacific/Chatham
>>> print_list(country_timezones['ch'])
Europe/Zurich
>>> print_list(country_timezones['CH'])
Europe/Zurich
>>> print_list(country_timezones[unicode('ch')])
Europe/Zurich
>>> print_list(country_timezones['XXX'])
Traceback (most recent call last):
...
KeyError: 'XXX'
Previously, this information was exposed as a function rather than a
dictionary. This is still supported::
>>> print_list(country_timezones('nz'))
Pacific/Auckland
Pacific/Chatham
"""
def __call__(self, iso3166_code):
"""Backwards compatibility."""
return self[iso3166_code]
def _fill(self):
data = {}
zone_tab = open_resource('zone.tab')
try:
for line in zone_tab:
line = line.decode('UTF-8')
if line.startswith('#'):
continue
code, coordinates, zone = line.split(None, 4)[:3]
if zone not in all_timezones_set: # noqa
continue
try:
data[code].append(zone)
except KeyError:
data[code] = [zone]
self.data = data
finally:
zone_tab.close()
country_timezones = _CountryTimezoneDict()
class _CountryNameDict(LazyDict):
'''Dictionary proving ISO3166 code -> English name.
>>> print(country_names['au'])
Australia
'''
def _fill(self):
data = {}
zone_tab = open_resource('iso3166.tab')
try:
for line in zone_tab.readlines():
line = line.decode('UTF-8')
if line.startswith('#'):
continue
code, name = line.split(None, 1)
data[code] = name.strip()
self.data = data
finally:
zone_tab.close()
country_names = _CountryNameDict()
# Time-zone info based solely on fixed offsets
class _FixedOffset(datetime.tzinfo):
zone = None # to match the standard pytz API
def __init__(self, minutes):
if abs(minutes) >= 1440:
raise ValueError("absolute offset is too large", minutes)
self._minutes = minutes
self._offset = datetime.timedelta(minutes=minutes)
def utcoffset(self, dt):
return self._offset
def __reduce__(self):
return FixedOffset, (self._minutes, )
def dst(self, dt):
return ZERO
def tzname(self, dt):
return None
def __repr__(self):
return 'pytz.FixedOffset(%d)' % self._minutes
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def FixedOffset(offset, _tzinfos={}):
"""return a fixed-offset timezone based off a number of minutes.
>>> one = FixedOffset(-330)
>>> one
pytz.FixedOffset(-330)
>>> str(one.utcoffset(datetime.datetime.now()))
'-1 day, 18:30:00'
>>> str(one.dst(datetime.datetime.now()))
'0:00:00'
>>> two = FixedOffset(1380)
>>> two
pytz.FixedOffset(1380)
>>> str(two.utcoffset(datetime.datetime.now()))
'23:00:00'
>>> str(two.dst(datetime.datetime.now()))
'0:00:00'
The datetime.timedelta must be between the range of -1 and 1 day,
non-inclusive.
>>> FixedOffset(1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', 1440)
>>> FixedOffset(-1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', -1440)
An offset of 0 is special-cased to return UTC.
>>> FixedOffset(0) is UTC
True
There should always be only one instance of a FixedOffset per timedelta.
This should be true for multiple creation calls.
>>> FixedOffset(-330) is one
True
>>> FixedOffset(1380) is two
True
It should also be true for pickling.
>>> import pickle
>>> pickle.loads(pickle.dumps(one)) is one
True
>>> pickle.loads(pickle.dumps(two)) is two
True
"""
if offset == 0:
return UTC
info = _tzinfos.get(offset)
if info is None:
# We haven't seen this one before. we need to save it.
# Use setdefault to avoid a race condition and make sure we have
# only one
info = _tzinfos.setdefault(offset, _FixedOffset(offset))
return info
FixedOffset.__safe_for_unpickling__ = True
def _test():
import doctest
sys.path.insert(0, os.pardir)
import pytz
return doctest.testmod(pytz)
if __name__ == '__main__':
_test()
all_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Asmera',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Timbuktu',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/ComodRivadavia',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Atka',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Buenos_Aires',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Catamarca',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Coral_Harbour',
'America/Cordoba',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Ensenada',
'America/Fort_Nelson',
'America/Fort_Wayne',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Indianapolis',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Jujuy',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Knox_IN',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Louisville',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Mendoza',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Nuuk',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Acre',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Punta_Arenas',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Rosario',
'America/Santa_Isabel',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Shiprock',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Virgin',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/South_Pole',
'Antarctica/Syowa',
'Antarctica/Troll',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Ashkhabad',
'Asia/Atyrau',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Barnaul',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Calcutta',
'Asia/Chita',
'Asia/Choibalsan',
'Asia/Chongqing',
'Asia/Chungking',
'Asia/Colombo',
'Asia/Dacca',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Famagusta',
'Asia/Gaza',
'Asia/Harbin',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Istanbul',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kashgar',
'Asia/Kathmandu',
'Asia/Katmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macao',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qostanay',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Saigon',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Srednekolymsk',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Tel_Aviv',
'Asia/Thimbu',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Tomsk',
'Asia/Ujung_Pandang',
'Asia/Ulaanbaatar',
'Asia/Ulan_Bator',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yangon',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faeroe',
'Atlantic/Faroe',
'Atlantic/Jan_Mayen',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/ACT',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Canberra',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/LHI',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/NSW',
'Australia/North',
'Australia/Perth',
'Australia/Queensland',
'Australia/South',
'Australia/Sydney',
'Australia/Tasmania',
'Australia/Victoria',
'Australia/West',
'Australia/Yancowinna',
'Brazil/Acre',
'Brazil/DeNoronha',
'Brazil/East',
'Brazil/West',
'CET',
'CST6CDT',
'Canada/Atlantic',
'Canada/Central',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Canada/Saskatchewan',
'Canada/Yukon',
'Chile/Continental',
'Chile/EasterIsland',
'Cuba',
'EET',
'EST',
'EST5EDT',
'Egypt',
'Eire',
'Etc/GMT',
'Etc/GMT+0',
'Etc/GMT+1',
'Etc/GMT+10',
'Etc/GMT+11',
'Etc/GMT+12',
'Etc/GMT+2',
'Etc/GMT+3',
'Etc/GMT+4',
'Etc/GMT+5',
'Etc/GMT+6',
'Etc/GMT+7',
'Etc/GMT+8',
'Etc/GMT+9',
'Etc/GMT-0',
'Etc/GMT-1',
'Etc/GMT-10',
'Etc/GMT-11',
'Etc/GMT-12',
'Etc/GMT-13',
'Etc/GMT-14',
'Etc/GMT-2',
'Etc/GMT-3',
'Etc/GMT-4',
'Etc/GMT-5',
'Etc/GMT-6',
'Etc/GMT-7',
'Etc/GMT-8',
'Etc/GMT-9',
'Etc/GMT0',
'Etc/Greenwich',
'Etc/UCT',
'Etc/UTC',
'Etc/Universal',
'Etc/Zulu',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Astrakhan',
'Europe/Athens',
'Europe/Belfast',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Kirov',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Nicosia',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Saratov',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Tiraspol',
'Europe/Ulyanovsk',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GB',
'GB-Eire',
'GMT',
'GMT+0',
'GMT-0',
'GMT0',
'Greenwich',
'HST',
'Hongkong',
'Iceland',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Iran',
'Israel',
'Jamaica',
'Japan',
'Kwajalein',
'Libya',
'MET',
'MST',
'MST7MDT',
'Mexico/BajaNorte',
'Mexico/BajaSur',
'Mexico/General',
'NZ',
'NZ-CHAT',
'Navajo',
'PRC',
'PST8PDT',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Ponape',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Samoa',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Truk',
'Pacific/Wake',
'Pacific/Wallis',
'Pacific/Yap',
'Poland',
'Portugal',
'ROC',
'ROK',
'Singapore',
'Turkey',
'UCT',
'US/Alaska',
'US/Aleutian',
'US/Arizona',
'US/Central',
'US/East-Indiana',
'US/Eastern',
'US/Hawaii',
'US/Indiana-Starke',
'US/Michigan',
'US/Mountain',
'US/Pacific',
'US/Samoa',
'UTC',
'Universal',
'W-SU',
'WET',
'Zulu']
all_timezones = LazyList(
tz for tz in all_timezones if resource_exists(tz))
all_timezones_set = LazySet(all_timezones)
common_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Fort_Nelson',
'America/Fortaleza',
'America/Glace_Bay',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Nuuk',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Punta_Arenas',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/Syowa',
'Antarctica/Troll',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Atyrau',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Barnaul',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Chita',
'Asia/Choibalsan',
'Asia/Colombo',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Famagusta',
'Asia/Gaza',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kathmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qostanay',
'Asia/Qyzylorda',
'Asia/Riyadh',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Srednekolymsk',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Tomsk',
'Asia/Ulaanbaatar',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yangon',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faroe',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/Perth',
'Australia/Sydney',
'Canada/Atlantic',
'Canada/Central',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Astrakhan',
'Europe/Athens',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Kirov',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Saratov',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Ulyanovsk',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GMT',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Wake',
'Pacific/Wallis',
'US/Alaska',
'US/Arizona',
'US/Central',
'US/Eastern',
'US/Hawaii',
'US/Mountain',
'US/Pacific',
'UTC']
common_timezones = LazyList(
tz for tz in common_timezones if tz in all_timezones)
common_timezones_set = LazySet(common_timezones)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/reference.py
|
'''
Reference tzinfo implementations from the Python docs.
Used for testing against as they are only correct for the years
1987 to 2006. Do not use these for real code.
'''
from datetime import tzinfo, timedelta, datetime
from pytz import HOUR, ZERO, UTC
__all__ = [
'FixedOffset',
'LocalTimezone',
'USTimeZone',
'Eastern',
'Central',
'Mountain',
'Pacific',
'UTC'
]
# A class building tzinfo objects for fixed-offset time zones.
# Note that FixedOffset(0, "UTC") is a different way to build a
# UTC tzinfo object.
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
import time as _time
STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
# A class capturing the platform's idea of local time.
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
Local = LocalTimezone()
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
# In the US, DST starts at 2am (standard time) on the first Sunday in April.
DSTSTART = datetime(1, 4, 1, 2)
# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct.
# which is the first Sunday on or after Oct 25.
DSTEND = datetime(1, 10, 25, 1)
# A complete implementation of current DST rules for major US time zones.
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception may be sensible here, in one or both cases.
# It depends on how you want to treat them. The default
# fromutc() implementation (called by the default astimezone()
# implementation) passes a datetime with dt.tzinfo is self.
return ZERO
assert dt.tzinfo is self
# Find first Sunday in April & the last in October.
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/exceptions.py
|
'''
Custom exceptions raised by pytz.
'''
__all__ = [
'UnknownTimeZoneError', 'InvalidTimeError', 'AmbiguousTimeError',
'NonExistentTimeError',
]
class Error(Exception):
'''Base class for all exceptions raised by the pytz library'''
class UnknownTimeZoneError(KeyError, Error):
'''Exception raised when pytz is passed an unknown timezone.
>>> isinstance(UnknownTimeZoneError(), LookupError)
True
This class is actually a subclass of KeyError to provide backwards
compatibility with code relying on the undocumented behavior of earlier
pytz releases.
>>> isinstance(UnknownTimeZoneError(), KeyError)
True
And also a subclass of pytz.exceptions.Error, as are other pytz
exceptions.
>>> isinstance(UnknownTimeZoneError(), Error)
True
'''
pass
class InvalidTimeError(Error):
'''Base class for invalid time exceptions.'''
class AmbiguousTimeError(InvalidTimeError):
'''Exception raised when attempting to create an ambiguous wallclock time.
At the end of a DST transition period, a particular wallclock time will
occur twice (once before the clocks are set back, once after). Both
possibilities may be correct, unless further information is supplied.
See DstTzInfo.normalize() for more info
'''
class NonExistentTimeError(InvalidTimeError):
'''Exception raised when attempting to create a wallclock time that
cannot exist.
At the start of a DST transition period, the wallclock time jumps forward.
The instants jumped over never occur.
'''
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/lazy.py
|
from threading import RLock
try:
from collections.abc import Mapping as DictMixin
except ImportError: # Python < 3.3
try:
from UserDict import DictMixin # Python 2
except ImportError: # Python 3.0-3.3
from collections import Mapping as DictMixin
# With lazy loading, we might end up with multiple threads triggering
# it at the same time. We need a lock.
_fill_lock = RLock()
class LazyDict(DictMixin):
"""Dictionary populated on first use."""
data = None
def __getitem__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data[key.upper()]
def __contains__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return key in self.data
def __iter__(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return iter(self.data)
def __len__(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return len(self.data)
def keys(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data.keys()
class LazyList(list):
"""List populated on first use."""
_props = [
'__str__', '__repr__', '__unicode__',
'__hash__', '__sizeof__', '__cmp__',
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
'append', 'count', 'index', 'extend', 'insert', 'pop', 'remove',
'reverse', 'sort', '__add__', '__radd__', '__iadd__', '__mul__',
'__rmul__', '__imul__', '__contains__', '__len__', '__nonzero__',
'__getitem__', '__setitem__', '__delitem__', '__iter__',
'__reversed__', '__getslice__', '__setslice__', '__delslice__']
def __new__(cls, fill_iter=None):
if fill_iter is None:
return list()
# We need a new class as we will be dynamically messing with its
# methods.
class LazyList(list):
pass
fill_iter = [fill_iter]
def lazy(name):
def _lazy(self, *args, **kw):
_fill_lock.acquire()
try:
if len(fill_iter) > 0:
list.extend(self, fill_iter.pop())
for method_name in cls._props:
delattr(LazyList, method_name)
finally:
_fill_lock.release()
return getattr(list, name)(self, *args, **kw)
return _lazy
for name in cls._props:
setattr(LazyList, name, lazy(name))
new_list = LazyList()
return new_list
# Not all versions of Python declare the same magic methods.
# Filter out properties that don't exist in this version of Python
# from the list.
LazyList._props = [prop for prop in LazyList._props if hasattr(list, prop)]
class LazySet(set):
"""Set populated on first use."""
_props = (
'__str__', '__repr__', '__unicode__',
'__hash__', '__sizeof__', '__cmp__',
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
'__contains__', '__len__', '__nonzero__',
'__getitem__', '__setitem__', '__delitem__', '__iter__',
'__sub__', '__and__', '__xor__', '__or__',
'__rsub__', '__rand__', '__rxor__', '__ror__',
'__isub__', '__iand__', '__ixor__', '__ior__',
'add', 'clear', 'copy', 'difference', 'difference_update',
'discard', 'intersection', 'intersection_update', 'isdisjoint',
'issubset', 'issuperset', 'pop', 'remove',
'symmetric_difference', 'symmetric_difference_update',
'union', 'update')
def __new__(cls, fill_iter=None):
if fill_iter is None:
return set()
class LazySet(set):
pass
fill_iter = [fill_iter]
def lazy(name):
def _lazy(self, *args, **kw):
_fill_lock.acquire()
try:
if len(fill_iter) > 0:
for i in fill_iter.pop():
set.add(self, i)
for method_name in cls._props:
delattr(LazySet, method_name)
finally:
_fill_lock.release()
return getattr(set, name)(self, *args, **kw)
return _lazy
for name in cls._props:
setattr(LazySet, name, lazy(name))
new_set = LazySet()
return new_set
# Not all versions of Python declare the same magic methods.
# Filter out properties that don't exist in this version of Python
# from the list.
LazySet._props = [prop for prop in LazySet._props if hasattr(set, prop)]
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/leapseconds
|
# Allowance for leap seconds added to each time zone file.
# This file is in the public domain.
# This file is generated automatically from the data in the public-domain
# NIST format leap-seconds.list file, which can be copied from
# <ftp://ftp.nist.gov/pub/time/leap-seconds.list>
# or <ftp://ftp.boulder.nist.gov/pub/time/leap-seconds.list>.
# For more about leap-seconds.list, please see
# The NTP Timescale and Leap Seconds
# <https://www.eecis.udel.edu/~mills/leap.html>.
# The rules for leap seconds are specified in Annex 1 (Time scales) of:
# Standard-frequency and time-signal emissions.
# International Telecommunication Union - Radiocommunication Sector
# (ITU-R) Recommendation TF.460-6 (02/2002)
# <https://www.itu.int/rec/R-REC-TF.460-6-200202-I/>.
# The International Earth Rotation and Reference Systems Service (IERS)
# periodically uses leap seconds to keep UTC to within 0.9 s of UT1
# (a proxy for Earth's angle in space as measured by astronomers)
# and publishes leap second data in a copyrighted file
# <https://hpiers.obspm.fr/iers/bul/bulc/Leap_Second.dat>.
# See: Levine J. Coordinated Universal Time and the leap second.
# URSI Radio Sci Bull. 2016;89(4):30-6. doi:10.23919/URSIRSB.2016.7909995
# <https://ieeexplore.ieee.org/document/7909995>.
# There were no leap seconds before 1972, as no official mechanism
# accounted for the discrepancy between atomic time (TAI) and the earth's
# rotation. The first ("1 Jan 1972") data line in leap-seconds.list
# does not denote a leap second; it denotes the start of the current definition
# of UTC.
# All leap-seconds are Stationary (S) at the given UTC time.
# The correction (+ or -) is made at the given time, so in the unlikely
# event of a negative leap second, a line would look like this:
# Leap YEAR MON DAY 23:59:59 - S
# Typical lines look like this:
# Leap YEAR MON DAY 23:59:60 + S
Leap 1972 Jun 30 23:59:60 + S
Leap 1972 Dec 31 23:59:60 + S
Leap 1973 Dec 31 23:59:60 + S
Leap 1974 Dec 31 23:59:60 + S
Leap 1975 Dec 31 23:59:60 + S
Leap 1976 Dec 31 23:59:60 + S
Leap 1977 Dec 31 23:59:60 + S
Leap 1978 Dec 31 23:59:60 + S
Leap 1979 Dec 31 23:59:60 + S
Leap 1981 Jun 30 23:59:60 + S
Leap 1982 Jun 30 23:59:60 + S
Leap 1983 Jun 30 23:59:60 + S
Leap 1985 Jun 30 23:59:60 + S
Leap 1987 Dec 31 23:59:60 + S
Leap 1989 Dec 31 23:59:60 + S
Leap 1990 Dec 31 23:59:60 + S
Leap 1992 Jun 30 23:59:60 + S
Leap 1993 Jun 30 23:59:60 + S
Leap 1994 Jun 30 23:59:60 + S
Leap 1995 Dec 31 23:59:60 + S
Leap 1997 Jun 30 23:59:60 + S
Leap 1998 Dec 31 23:59:60 + S
Leap 2005 Dec 31 23:59:60 + S
Leap 2008 Dec 31 23:59:60 + S
Leap 2012 Jun 30 23:59:60 + S
Leap 2015 Jun 30 23:59:60 + S
Leap 2016 Dec 31 23:59:60 + S
# UTC timestamp when this leap second list expires.
# Any additional leap seconds will come after this.
# This Expires line is commented out for now,
# so that pre-2020a zic implementations do not reject this file.
#Expires 2020 Dec 28 00:00:00
# POSIX timestamps for the data in this file:
#updated 1467936000 (2016-07-08 00:00:00 UTC)
#expires 1609113600 (2020-12-28 00:00:00 UTC)
# Updated through IERS Bulletin C59
# File expires on: 28 December 2020
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/zone1970.tab
|
# tzdb timezone descriptions
#
# This file is in the public domain.
#
# From Paul Eggert (2018-06-27):
# This file contains a table where each row stands for a timezone where
# civil timestamps have agreed since 1970. Columns are separated by
# a single tab. Lines beginning with '#' are comments. All text uses
# UTF-8 encoding. The columns of the table are as follows:
#
# 1. The countries that overlap the timezone, as a comma-separated list
# of ISO 3166 2-character country codes. See the file 'iso3166.tab'.
# 2. Latitude and longitude of the timezone's principal location
# in ISO 6709 sign-degrees-minutes-seconds format,
# either ±DDMM±DDDMM or ±DDMMSS±DDDMMSS,
# first latitude (+ is north), then longitude (+ is east).
# 3. Timezone name used in value of TZ environment variable.
# Please see the theory.html file for how these names are chosen.
# If multiple timezones overlap a country, each has a row in the
# table, with each column 1 containing the country code.
# 4. Comments; present if and only if a country has multiple timezones.
#
# If a timezone covers multiple countries, the most-populous city is used,
# and that country is listed first in column 1; any other countries
# are listed alphabetically by country code. The table is sorted
# first by country code, then (if possible) by an order within the
# country that (1) makes some geographical sense, and (2) puts the
# most populous timezones first, where that does not contradict (1).
#
# This table is intended as an aid for users, to help them select timezones
# appropriate for their practical needs. It is not intended to take or
# endorse any position on legal or territorial claims.
#
#country-
#codes coordinates TZ comments
AD +4230+00131 Europe/Andorra
AE,OM +2518+05518 Asia/Dubai
AF +3431+06912 Asia/Kabul
AL +4120+01950 Europe/Tirane
AM +4011+04430 Asia/Yerevan
AQ -6617+11031 Antarctica/Casey Casey
AQ -6835+07758 Antarctica/Davis Davis
AQ -6640+14001 Antarctica/DumontDUrville Dumont-d'Urville
AQ -6736+06253 Antarctica/Mawson Mawson
AQ -6448-06406 Antarctica/Palmer Palmer
AQ -6734-06808 Antarctica/Rothera Rothera
AQ -690022+0393524 Antarctica/Syowa Syowa
AQ -720041+0023206 Antarctica/Troll Troll
AQ -7824+10654 Antarctica/Vostok Vostok
AR -3436-05827 America/Argentina/Buenos_Aires Buenos Aires (BA, CF)
AR -3124-06411 America/Argentina/Cordoba Argentina (most areas: CB, CC, CN, ER, FM, MN, SE, SF)
AR -2447-06525 America/Argentina/Salta Salta (SA, LP, NQ, RN)
AR -2411-06518 America/Argentina/Jujuy Jujuy (JY)
AR -2649-06513 America/Argentina/Tucuman Tucumán (TM)
AR -2828-06547 America/Argentina/Catamarca Catamarca (CT); Chubut (CH)
AR -2926-06651 America/Argentina/La_Rioja La Rioja (LR)
AR -3132-06831 America/Argentina/San_Juan San Juan (SJ)
AR -3253-06849 America/Argentina/Mendoza Mendoza (MZ)
AR -3319-06621 America/Argentina/San_Luis San Luis (SL)
AR -5138-06913 America/Argentina/Rio_Gallegos Santa Cruz (SC)
AR -5448-06818 America/Argentina/Ushuaia Tierra del Fuego (TF)
AS,UM -1416-17042 Pacific/Pago_Pago Samoa, Midway
AT +4813+01620 Europe/Vienna
AU -3133+15905 Australia/Lord_Howe Lord Howe Island
AU -5430+15857 Antarctica/Macquarie Macquarie Island
AU -4253+14719 Australia/Hobart Tasmania (most areas)
AU -3956+14352 Australia/Currie Tasmania (King Island)
AU -3749+14458 Australia/Melbourne Victoria
AU -3352+15113 Australia/Sydney New South Wales (most areas)
AU -3157+14127 Australia/Broken_Hill New South Wales (Yancowinna)
AU -2728+15302 Australia/Brisbane Queensland (most areas)
AU -2016+14900 Australia/Lindeman Queensland (Whitsunday Islands)
AU -3455+13835 Australia/Adelaide South Australia
AU -1228+13050 Australia/Darwin Northern Territory
AU -3157+11551 Australia/Perth Western Australia (most areas)
AU -3143+12852 Australia/Eucla Western Australia (Eucla)
AZ +4023+04951 Asia/Baku
BB +1306-05937 America/Barbados
BD +2343+09025 Asia/Dhaka
BE +5050+00420 Europe/Brussels
BG +4241+02319 Europe/Sofia
BM +3217-06446 Atlantic/Bermuda
BN +0456+11455 Asia/Brunei
BO -1630-06809 America/La_Paz
BR -0351-03225 America/Noronha Atlantic islands
BR -0127-04829 America/Belem Pará (east); Amapá
BR -0343-03830 America/Fortaleza Brazil (northeast: MA, PI, CE, RN, PB)
BR -0803-03454 America/Recife Pernambuco
BR -0712-04812 America/Araguaina Tocantins
BR -0940-03543 America/Maceio Alagoas, Sergipe
BR -1259-03831 America/Bahia Bahia
BR -2332-04637 America/Sao_Paulo Brazil (southeast: GO, DF, MG, ES, RJ, SP, PR, SC, RS)
BR -2027-05437 America/Campo_Grande Mato Grosso do Sul
BR -1535-05605 America/Cuiaba Mato Grosso
BR -0226-05452 America/Santarem Pará (west)
BR -0846-06354 America/Porto_Velho Rondônia
BR +0249-06040 America/Boa_Vista Roraima
BR -0308-06001 America/Manaus Amazonas (east)
BR -0640-06952 America/Eirunepe Amazonas (west)
BR -0958-06748 America/Rio_Branco Acre
BS +2505-07721 America/Nassau
BT +2728+08939 Asia/Thimphu
BY +5354+02734 Europe/Minsk
BZ +1730-08812 America/Belize
CA +4734-05243 America/St_Johns Newfoundland; Labrador (southeast)
CA +4439-06336 America/Halifax Atlantic - NS (most areas); PE
CA +4612-05957 America/Glace_Bay Atlantic - NS (Cape Breton)
CA +4606-06447 America/Moncton Atlantic - New Brunswick
CA +5320-06025 America/Goose_Bay Atlantic - Labrador (most areas)
CA +5125-05707 America/Blanc-Sablon AST - QC (Lower North Shore)
CA +4339-07923 America/Toronto Eastern - ON, QC (most areas)
CA +4901-08816 America/Nipigon Eastern - ON, QC (no DST 1967-73)
CA +4823-08915 America/Thunder_Bay Eastern - ON (Thunder Bay)
CA +6344-06828 America/Iqaluit Eastern - NU (most east areas)
CA +6608-06544 America/Pangnirtung Eastern - NU (Pangnirtung)
CA +484531-0913718 America/Atikokan EST - ON (Atikokan); NU (Coral H)
CA +4953-09709 America/Winnipeg Central - ON (west); Manitoba
CA +4843-09434 America/Rainy_River Central - ON (Rainy R, Ft Frances)
CA +744144-0944945 America/Resolute Central - NU (Resolute)
CA +624900-0920459 America/Rankin_Inlet Central - NU (central)
CA +5024-10439 America/Regina CST - SK (most areas)
CA +5017-10750 America/Swift_Current CST - SK (midwest)
CA +5333-11328 America/Edmonton Mountain - AB; BC (E); SK (W)
CA +690650-1050310 America/Cambridge_Bay Mountain - NU (west)
CA +6227-11421 America/Yellowknife Mountain - NT (central)
CA +682059-1334300 America/Inuvik Mountain - NT (west)
CA +4906-11631 America/Creston MST - BC (Creston)
CA +5946-12014 America/Dawson_Creek MST - BC (Dawson Cr, Ft St John)
CA +5848-12242 America/Fort_Nelson MST - BC (Ft Nelson)
CA +4916-12307 America/Vancouver Pacific - BC (most areas)
CA +6043-13503 America/Whitehorse Pacific - Yukon (east)
CA +6404-13925 America/Dawson Pacific - Yukon (west)
CC -1210+09655 Indian/Cocos
CH,DE,LI +4723+00832 Europe/Zurich Swiss time
CI,BF,GM,GN,ML,MR,SH,SL,SN,TG +0519-00402 Africa/Abidjan
CK -2114-15946 Pacific/Rarotonga
CL -3327-07040 America/Santiago Chile (most areas)
CL -5309-07055 America/Punta_Arenas Region of Magallanes
CL -2709-10926 Pacific/Easter Easter Island
CN +3114+12128 Asia/Shanghai Beijing Time
CN +4348+08735 Asia/Urumqi Xinjiang Time
CO +0436-07405 America/Bogota
CR +0956-08405 America/Costa_Rica
CU +2308-08222 America/Havana
CV +1455-02331 Atlantic/Cape_Verde
CW,AW,BQ,SX +1211-06900 America/Curacao
CX -1025+10543 Indian/Christmas
CY +3510+03322 Asia/Nicosia Cyprus (most areas)
CY +3507+03357 Asia/Famagusta Northern Cyprus
CZ,SK +5005+01426 Europe/Prague
DE +5230+01322 Europe/Berlin Germany (most areas)
DK +5540+01235 Europe/Copenhagen
DO +1828-06954 America/Santo_Domingo
DZ +3647+00303 Africa/Algiers
EC -0210-07950 America/Guayaquil Ecuador (mainland)
EC -0054-08936 Pacific/Galapagos Galápagos Islands
EE +5925+02445 Europe/Tallinn
EG +3003+03115 Africa/Cairo
EH +2709-01312 Africa/El_Aaiun
ES +4024-00341 Europe/Madrid Spain (mainland)
ES +3553-00519 Africa/Ceuta Ceuta, Melilla
ES +2806-01524 Atlantic/Canary Canary Islands
FI,AX +6010+02458 Europe/Helsinki
FJ -1808+17825 Pacific/Fiji
FK -5142-05751 Atlantic/Stanley
FM +0725+15147 Pacific/Chuuk Chuuk/Truk, Yap
FM +0658+15813 Pacific/Pohnpei Pohnpei/Ponape
FM +0519+16259 Pacific/Kosrae Kosrae
FO +6201-00646 Atlantic/Faroe
FR +4852+00220 Europe/Paris
GB,GG,IM,JE +513030-0000731 Europe/London
GE +4143+04449 Asia/Tbilisi
GF +0456-05220 America/Cayenne
GH +0533-00013 Africa/Accra
GI +3608-00521 Europe/Gibraltar
GL +6411-05144 America/Nuuk Greenland (most areas)
GL +7646-01840 America/Danmarkshavn National Park (east coast)
GL +7029-02158 America/Scoresbysund Scoresbysund/Ittoqqortoormiit
GL +7634-06847 America/Thule Thule/Pituffik
GR +3758+02343 Europe/Athens
GS -5416-03632 Atlantic/South_Georgia
GT +1438-09031 America/Guatemala
GU,MP +1328+14445 Pacific/Guam
GW +1151-01535 Africa/Bissau
GY +0648-05810 America/Guyana
HK +2217+11409 Asia/Hong_Kong
HN +1406-08713 America/Tegucigalpa
HT +1832-07220 America/Port-au-Prince
HU +4730+01905 Europe/Budapest
ID -0610+10648 Asia/Jakarta Java, Sumatra
ID -0002+10920 Asia/Pontianak Borneo (west, central)
ID -0507+11924 Asia/Makassar Borneo (east, south); Sulawesi/Celebes, Bali, Nusa Tengarra; Timor (west)
ID -0232+14042 Asia/Jayapura New Guinea (West Papua / Irian Jaya); Malukus/Moluccas
IE +5320-00615 Europe/Dublin
IL +314650+0351326 Asia/Jerusalem
IN +2232+08822 Asia/Kolkata
IO -0720+07225 Indian/Chagos
IQ +3321+04425 Asia/Baghdad
IR +3540+05126 Asia/Tehran
IS +6409-02151 Atlantic/Reykjavik
IT,SM,VA +4154+01229 Europe/Rome
JM +175805-0764736 America/Jamaica
JO +3157+03556 Asia/Amman
JP +353916+1394441 Asia/Tokyo
KE,DJ,ER,ET,KM,MG,SO,TZ,UG,YT -0117+03649 Africa/Nairobi
KG +4254+07436 Asia/Bishkek
KI +0125+17300 Pacific/Tarawa Gilbert Islands
KI -0308-17105 Pacific/Enderbury Phoenix Islands
KI +0152-15720 Pacific/Kiritimati Line Islands
KP +3901+12545 Asia/Pyongyang
KR +3733+12658 Asia/Seoul
KZ +4315+07657 Asia/Almaty Kazakhstan (most areas)
KZ +4448+06528 Asia/Qyzylorda Qyzylorda/Kyzylorda/Kzyl-Orda
KZ +5312+06337 Asia/Qostanay Qostanay/Kostanay/Kustanay
KZ +5017+05710 Asia/Aqtobe Aqtöbe/Aktobe
KZ +4431+05016 Asia/Aqtau Mangghystaū/Mankistau
KZ +4707+05156 Asia/Atyrau Atyraū/Atirau/Gur'yev
KZ +5113+05121 Asia/Oral West Kazakhstan
LB +3353+03530 Asia/Beirut
LK +0656+07951 Asia/Colombo
LR +0618-01047 Africa/Monrovia
LT +5441+02519 Europe/Vilnius
LU +4936+00609 Europe/Luxembourg
LV +5657+02406 Europe/Riga
LY +3254+01311 Africa/Tripoli
MA +3339-00735 Africa/Casablanca
MC +4342+00723 Europe/Monaco
MD +4700+02850 Europe/Chisinau
MH +0709+17112 Pacific/Majuro Marshall Islands (most areas)
MH +0905+16720 Pacific/Kwajalein Kwajalein
MM +1647+09610 Asia/Yangon
MN +4755+10653 Asia/Ulaanbaatar Mongolia (most areas)
MN +4801+09139 Asia/Hovd Bayan-Ölgii, Govi-Altai, Hovd, Uvs, Zavkhan
MN +4804+11430 Asia/Choibalsan Dornod, Sükhbaatar
MO +221150+1133230 Asia/Macau
MQ +1436-06105 America/Martinique
MT +3554+01431 Europe/Malta
MU -2010+05730 Indian/Mauritius
MV +0410+07330 Indian/Maldives
MX +1924-09909 America/Mexico_City Central Time
MX +2105-08646 America/Cancun Eastern Standard Time - Quintana Roo
MX +2058-08937 America/Merida Central Time - Campeche, Yucatán
MX +2540-10019 America/Monterrey Central Time - Durango; Coahuila, Nuevo León, Tamaulipas (most areas)
MX +2550-09730 America/Matamoros Central Time US - Coahuila, Nuevo León, Tamaulipas (US border)
MX +2313-10625 America/Mazatlan Mountain Time - Baja California Sur, Nayarit, Sinaloa
MX +2838-10605 America/Chihuahua Mountain Time - Chihuahua (most areas)
MX +2934-10425 America/Ojinaga Mountain Time US - Chihuahua (US border)
MX +2904-11058 America/Hermosillo Mountain Standard Time - Sonora
MX +3232-11701 America/Tijuana Pacific Time US - Baja California
MX +2048-10515 America/Bahia_Banderas Central Time - Bahía de Banderas
MY +0310+10142 Asia/Kuala_Lumpur Malaysia (peninsula)
MY +0133+11020 Asia/Kuching Sabah, Sarawak
MZ,BI,BW,CD,MW,RW,ZM,ZW -2558+03235 Africa/Maputo Central Africa Time
NA -2234+01706 Africa/Windhoek
NC -2216+16627 Pacific/Noumea
NF -2903+16758 Pacific/Norfolk
NG,AO,BJ,CD,CF,CG,CM,GA,GQ,NE +0627+00324 Africa/Lagos West Africa Time
NI +1209-08617 America/Managua
NL +5222+00454 Europe/Amsterdam
NO,SJ +5955+01045 Europe/Oslo
NP +2743+08519 Asia/Kathmandu
NR -0031+16655 Pacific/Nauru
NU -1901-16955 Pacific/Niue
NZ,AQ -3652+17446 Pacific/Auckland New Zealand time
NZ -4357-17633 Pacific/Chatham Chatham Islands
PA,KY +0858-07932 America/Panama
PE -1203-07703 America/Lima
PF -1732-14934 Pacific/Tahiti Society Islands
PF -0900-13930 Pacific/Marquesas Marquesas Islands
PF -2308-13457 Pacific/Gambier Gambier Islands
PG -0930+14710 Pacific/Port_Moresby Papua New Guinea (most areas)
PG -0613+15534 Pacific/Bougainville Bougainville
PH +1435+12100 Asia/Manila
PK +2452+06703 Asia/Karachi
PL +5215+02100 Europe/Warsaw
PM +4703-05620 America/Miquelon
PN -2504-13005 Pacific/Pitcairn
PR +182806-0660622 America/Puerto_Rico
PS +3130+03428 Asia/Gaza Gaza Strip
PS +313200+0350542 Asia/Hebron West Bank
PT +3843-00908 Europe/Lisbon Portugal (mainland)
PT +3238-01654 Atlantic/Madeira Madeira Islands
PT +3744-02540 Atlantic/Azores Azores
PW +0720+13429 Pacific/Palau
PY -2516-05740 America/Asuncion
QA,BH +2517+05132 Asia/Qatar
RE,TF -2052+05528 Indian/Reunion Réunion, Crozet, Scattered Islands
RO +4426+02606 Europe/Bucharest
RS,BA,HR,ME,MK,SI +4450+02030 Europe/Belgrade
RU +5443+02030 Europe/Kaliningrad MSK-01 - Kaliningrad
RU +554521+0373704 Europe/Moscow MSK+00 - Moscow area
# Mention RU and UA alphabetically. See "territorial claims" above.
RU,UA +4457+03406 Europe/Simferopol Crimea
RU +5836+04939 Europe/Kirov MSK+00 - Kirov
RU +4621+04803 Europe/Astrakhan MSK+01 - Astrakhan
RU +4844+04425 Europe/Volgograd MSK+01 - Volgograd
RU +5134+04602 Europe/Saratov MSK+01 - Saratov
RU +5420+04824 Europe/Ulyanovsk MSK+01 - Ulyanovsk
RU +5312+05009 Europe/Samara MSK+01 - Samara, Udmurtia
RU +5651+06036 Asia/Yekaterinburg MSK+02 - Urals
RU +5500+07324 Asia/Omsk MSK+03 - Omsk
RU +5502+08255 Asia/Novosibirsk MSK+04 - Novosibirsk
RU +5322+08345 Asia/Barnaul MSK+04 - Altai
RU +5630+08458 Asia/Tomsk MSK+04 - Tomsk
RU +5345+08707 Asia/Novokuznetsk MSK+04 - Kemerovo
RU +5601+09250 Asia/Krasnoyarsk MSK+04 - Krasnoyarsk area
RU +5216+10420 Asia/Irkutsk MSK+05 - Irkutsk, Buryatia
RU +5203+11328 Asia/Chita MSK+06 - Zabaykalsky
RU +6200+12940 Asia/Yakutsk MSK+06 - Lena River
RU +623923+1353314 Asia/Khandyga MSK+06 - Tomponsky, Ust-Maysky
RU +4310+13156 Asia/Vladivostok MSK+07 - Amur River
RU +643337+1431336 Asia/Ust-Nera MSK+07 - Oymyakonsky
RU +5934+15048 Asia/Magadan MSK+08 - Magadan
RU +4658+14242 Asia/Sakhalin MSK+08 - Sakhalin Island
RU +6728+15343 Asia/Srednekolymsk MSK+08 - Sakha (E); North Kuril Is
RU +5301+15839 Asia/Kamchatka MSK+09 - Kamchatka
RU +6445+17729 Asia/Anadyr MSK+09 - Bering Sea
SA,KW,YE +2438+04643 Asia/Riyadh
SB -0932+16012 Pacific/Guadalcanal
SC -0440+05528 Indian/Mahe
SD +1536+03232 Africa/Khartoum
SE +5920+01803 Europe/Stockholm
SG +0117+10351 Asia/Singapore
SR +0550-05510 America/Paramaribo
SS +0451+03137 Africa/Juba
ST +0020+00644 Africa/Sao_Tome
SV +1342-08912 America/El_Salvador
SY +3330+03618 Asia/Damascus
TC +2128-07108 America/Grand_Turk
TD +1207+01503 Africa/Ndjamena
TF -492110+0701303 Indian/Kerguelen Kerguelen, St Paul Island, Amsterdam Island
TH,KH,LA,VN +1345+10031 Asia/Bangkok Indochina (most areas)
TJ +3835+06848 Asia/Dushanbe
TK -0922-17114 Pacific/Fakaofo
TL -0833+12535 Asia/Dili
TM +3757+05823 Asia/Ashgabat
TN +3648+01011 Africa/Tunis
TO -2110-17510 Pacific/Tongatapu
TR +4101+02858 Europe/Istanbul
TT,AG,AI,BL,DM,GD,GP,KN,LC,MF,MS,VC,VG,VI +1039-06131 America/Port_of_Spain
TV -0831+17913 Pacific/Funafuti
TW +2503+12130 Asia/Taipei
UA +5026+03031 Europe/Kiev Ukraine (most areas)
UA +4837+02218 Europe/Uzhgorod Transcarpathia
UA +4750+03510 Europe/Zaporozhye Zaporozhye and east Lugansk
UM +1917+16637 Pacific/Wake Wake Island
US +404251-0740023 America/New_York Eastern (most areas)
US +421953-0830245 America/Detroit Eastern - MI (most areas)
US +381515-0854534 America/Kentucky/Louisville Eastern - KY (Louisville area)
US +364947-0845057 America/Kentucky/Monticello Eastern - KY (Wayne)
US +394606-0860929 America/Indiana/Indianapolis Eastern - IN (most areas)
US +384038-0873143 America/Indiana/Vincennes Eastern - IN (Da, Du, K, Mn)
US +410305-0863611 America/Indiana/Winamac Eastern - IN (Pulaski)
US +382232-0862041 America/Indiana/Marengo Eastern - IN (Crawford)
US +382931-0871643 America/Indiana/Petersburg Eastern - IN (Pike)
US +384452-0850402 America/Indiana/Vevay Eastern - IN (Switzerland)
US +415100-0873900 America/Chicago Central (most areas)
US +375711-0864541 America/Indiana/Tell_City Central - IN (Perry)
US +411745-0863730 America/Indiana/Knox Central - IN (Starke)
US +450628-0873651 America/Menominee Central - MI (Wisconsin border)
US +470659-1011757 America/North_Dakota/Center Central - ND (Oliver)
US +465042-1012439 America/North_Dakota/New_Salem Central - ND (Morton rural)
US +471551-1014640 America/North_Dakota/Beulah Central - ND (Mercer)
US +394421-1045903 America/Denver Mountain (most areas)
US +433649-1161209 America/Boise Mountain - ID (south); OR (east)
US +332654-1120424 America/Phoenix MST - Arizona (except Navajo)
US +340308-1181434 America/Los_Angeles Pacific
US +611305-1495401 America/Anchorage Alaska (most areas)
US +581807-1342511 America/Juneau Alaska - Juneau area
US +571035-1351807 America/Sitka Alaska - Sitka area
US +550737-1313435 America/Metlakatla Alaska - Annette Island
US +593249-1394338 America/Yakutat Alaska - Yakutat
US +643004-1652423 America/Nome Alaska (west)
US +515248-1763929 America/Adak Aleutian Islands
US,UM +211825-1575130 Pacific/Honolulu Hawaii
UY -345433-0561245 America/Montevideo
UZ +3940+06648 Asia/Samarkand Uzbekistan (west)
UZ +4120+06918 Asia/Tashkent Uzbekistan (east)
VE +1030-06656 America/Caracas
VN +1045+10640 Asia/Ho_Chi_Minh Vietnam (south)
VU -1740+16825 Pacific/Efate
WF -1318-17610 Pacific/Wallis
WS -1350-17144 Pacific/Apia
ZA,LS,SZ -2615+02800 Africa/Johannesburg
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Universal
|
TZif2 UTC TZif2 UTC
UTC0
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/iso3166.tab
|
# ISO 3166 alpha-2 country codes
#
# This file is in the public domain, so clarified as of
# 2009-05-17 by Arthur David Olson.
#
# From Paul Eggert (2015-05-02):
# This file contains a table of two-letter country codes. Columns are
# separated by a single tab. Lines beginning with '#' are comments.
# All text uses UTF-8 encoding. The columns of the table are as follows:
#
# 1. ISO 3166-1 alpha-2 country code, current as of
# ISO 3166-1 N976 (2018-11-06). See: Updates on ISO 3166-1
# https://isotc.iso.org/livelink/livelink/Open/16944257
# 2. The usual English name for the coded region,
# chosen so that alphabetic sorting of subsets produces helpful lists.
# This is not the same as the English name in the ISO 3166 tables.
#
# The table is sorted by country code.
#
# This table is intended as an aid for users, to help them select time
# zone data appropriate for their practical needs. It is not intended
# to take or endorse any position on legal or territorial claims.
#
#country-
#code name of country, territory, area, or subdivision
AD Andorra
AE United Arab Emirates
AF Afghanistan
AG Antigua & Barbuda
AI Anguilla
AL Albania
AM Armenia
AO Angola
AQ Antarctica
AR Argentina
AS Samoa (American)
AT Austria
AU Australia
AW Aruba
AX Åland Islands
AZ Azerbaijan
BA Bosnia & Herzegovina
BB Barbados
BD Bangladesh
BE Belgium
BF Burkina Faso
BG Bulgaria
BH Bahrain
BI Burundi
BJ Benin
BL St Barthelemy
BM Bermuda
BN Brunei
BO Bolivia
BQ Caribbean NL
BR Brazil
BS Bahamas
BT Bhutan
BV Bouvet Island
BW Botswana
BY Belarus
BZ Belize
CA Canada
CC Cocos (Keeling) Islands
CD Congo (Dem. Rep.)
CF Central African Rep.
CG Congo (Rep.)
CH Switzerland
CI Côte d'Ivoire
CK Cook Islands
CL Chile
CM Cameroon
CN China
CO Colombia
CR Costa Rica
CU Cuba
CV Cape Verde
CW Curaçao
CX Christmas Island
CY Cyprus
CZ Czech Republic
DE Germany
DJ Djibouti
DK Denmark
DM Dominica
DO Dominican Republic
DZ Algeria
EC Ecuador
EE Estonia
EG Egypt
EH Western Sahara
ER Eritrea
ES Spain
ET Ethiopia
FI Finland
FJ Fiji
FK Falkland Islands
FM Micronesia
FO Faroe Islands
FR France
GA Gabon
GB Britain (UK)
GD Grenada
GE Georgia
GF French Guiana
GG Guernsey
GH Ghana
GI Gibraltar
GL Greenland
GM Gambia
GN Guinea
GP Guadeloupe
GQ Equatorial Guinea
GR Greece
GS South Georgia & the South Sandwich Islands
GT Guatemala
GU Guam
GW Guinea-Bissau
GY Guyana
HK Hong Kong
HM Heard Island & McDonald Islands
HN Honduras
HR Croatia
HT Haiti
HU Hungary
ID Indonesia
IE Ireland
IL Israel
IM Isle of Man
IN India
IO British Indian Ocean Territory
IQ Iraq
IR Iran
IS Iceland
IT Italy
JE Jersey
JM Jamaica
JO Jordan
JP Japan
KE Kenya
KG Kyrgyzstan
KH Cambodia
KI Kiribati
KM Comoros
KN St Kitts & Nevis
KP Korea (North)
KR Korea (South)
KW Kuwait
KY Cayman Islands
KZ Kazakhstan
LA Laos
LB Lebanon
LC St Lucia
LI Liechtenstein
LK Sri Lanka
LR Liberia
LS Lesotho
LT Lithuania
LU Luxembourg
LV Latvia
LY Libya
MA Morocco
MC Monaco
MD Moldova
ME Montenegro
MF St Martin (French)
MG Madagascar
MH Marshall Islands
MK North Macedonia
ML Mali
MM Myanmar (Burma)
MN Mongolia
MO Macau
MP Northern Mariana Islands
MQ Martinique
MR Mauritania
MS Montserrat
MT Malta
MU Mauritius
MV Maldives
MW Malawi
MX Mexico
MY Malaysia
MZ Mozambique
NA Namibia
NC New Caledonia
NE Niger
NF Norfolk Island
NG Nigeria
NI Nicaragua
NL Netherlands
NO Norway
NP Nepal
NR Nauru
NU Niue
NZ New Zealand
OM Oman
PA Panama
PE Peru
PF French Polynesia
PG Papua New Guinea
PH Philippines
PK Pakistan
PL Poland
PM St Pierre & Miquelon
PN Pitcairn
PR Puerto Rico
PS Palestine
PT Portugal
PW Palau
PY Paraguay
QA Qatar
RE Réunion
RO Romania
RS Serbia
RU Russia
RW Rwanda
SA Saudi Arabia
SB Solomon Islands
SC Seychelles
SD Sudan
SE Sweden
SG Singapore
SH St Helena
SI Slovenia
SJ Svalbard & Jan Mayen
SK Slovakia
SL Sierra Leone
SM San Marino
SN Senegal
SO Somalia
SR Suriname
SS South Sudan
ST Sao Tome & Principe
SV El Salvador
SX St Maarten (Dutch)
SY Syria
SZ Eswatini (Swaziland)
TC Turks & Caicos Is
TD Chad
TF French Southern & Antarctic Lands
TG Togo
TH Thailand
TJ Tajikistan
TK Tokelau
TL East Timor
TM Turkmenistan
TN Tunisia
TO Tonga
TR Turkey
TT Trinidad & Tobago
TV Tuvalu
TW Taiwan
TZ Tanzania
UA Ukraine
UG Uganda
UM US minor outlying islands
US United States
UY Uruguay
UZ Uzbekistan
VA Vatican City
VC St Vincent
VE Venezuela
VG Virgin Islands (UK)
VI Virgin Islands (US)
VN Vietnam
VU Vanuatu
WF Wallis & Futuna
WS Samoa (western)
YE Yemen
YT Mayotte
ZA South Africa
ZM Zambia
ZW Zimbabwe
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Greenwich
|
TZif2 GMT TZif2 GMT
GMT0
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/GMT-0
|
TZif2 GMT TZif2 GMT
GMT0
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/GMT
|
TZif2 GMT TZif2 GMT
GMT0
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Zulu
|
TZif2 UTC TZif2 UTC
UTC0
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/GMT0
|
TZif2 GMT TZif2 GMT
GMT0
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/zone.tab
|
# tzdb timezone descriptions (deprecated version)
#
# This file is in the public domain, so clarified as of
# 2009-05-17 by Arthur David Olson.
#
# From Paul Eggert (2018-06-27):
# This file is intended as a backward-compatibility aid for older programs.
# New programs should use zone1970.tab. This file is like zone1970.tab (see
# zone1970.tab's comments), but with the following additional restrictions:
#
# 1. This file contains only ASCII characters.
# 2. The first data column contains exactly one country code.
#
# Because of (2), each row stands for an area that is the intersection
# of a region identified by a country code and of a timezone where civil
# clocks have agreed since 1970; this is a narrower definition than
# that of zone1970.tab.
#
# This table is intended as an aid for users, to help them select timezones
# appropriate for their practical needs. It is not intended to take or
# endorse any position on legal or territorial claims.
#
#country-
#code coordinates TZ comments
AD +4230+00131 Europe/Andorra
AE +2518+05518 Asia/Dubai
AF +3431+06912 Asia/Kabul
AG +1703-06148 America/Antigua
AI +1812-06304 America/Anguilla
AL +4120+01950 Europe/Tirane
AM +4011+04430 Asia/Yerevan
AO -0848+01314 Africa/Luanda
AQ -7750+16636 Antarctica/McMurdo New Zealand time - McMurdo, South Pole
AQ -6617+11031 Antarctica/Casey Casey
AQ -6835+07758 Antarctica/Davis Davis
AQ -6640+14001 Antarctica/DumontDUrville Dumont-d'Urville
AQ -6736+06253 Antarctica/Mawson Mawson
AQ -6448-06406 Antarctica/Palmer Palmer
AQ -6734-06808 Antarctica/Rothera Rothera
AQ -690022+0393524 Antarctica/Syowa Syowa
AQ -720041+0023206 Antarctica/Troll Troll
AQ -7824+10654 Antarctica/Vostok Vostok
AR -3436-05827 America/Argentina/Buenos_Aires Buenos Aires (BA, CF)
AR -3124-06411 America/Argentina/Cordoba Argentina (most areas: CB, CC, CN, ER, FM, MN, SE, SF)
AR -2447-06525 America/Argentina/Salta Salta (SA, LP, NQ, RN)
AR -2411-06518 America/Argentina/Jujuy Jujuy (JY)
AR -2649-06513 America/Argentina/Tucuman Tucuman (TM)
AR -2828-06547 America/Argentina/Catamarca Catamarca (CT); Chubut (CH)
AR -2926-06651 America/Argentina/La_Rioja La Rioja (LR)
AR -3132-06831 America/Argentina/San_Juan San Juan (SJ)
AR -3253-06849 America/Argentina/Mendoza Mendoza (MZ)
AR -3319-06621 America/Argentina/San_Luis San Luis (SL)
AR -5138-06913 America/Argentina/Rio_Gallegos Santa Cruz (SC)
AR -5448-06818 America/Argentina/Ushuaia Tierra del Fuego (TF)
AS -1416-17042 Pacific/Pago_Pago
AT +4813+01620 Europe/Vienna
AU -3133+15905 Australia/Lord_Howe Lord Howe Island
AU -5430+15857 Antarctica/Macquarie Macquarie Island
AU -4253+14719 Australia/Hobart Tasmania (most areas)
AU -3956+14352 Australia/Currie Tasmania (King Island)
AU -3749+14458 Australia/Melbourne Victoria
AU -3352+15113 Australia/Sydney New South Wales (most areas)
AU -3157+14127 Australia/Broken_Hill New South Wales (Yancowinna)
AU -2728+15302 Australia/Brisbane Queensland (most areas)
AU -2016+14900 Australia/Lindeman Queensland (Whitsunday Islands)
AU -3455+13835 Australia/Adelaide South Australia
AU -1228+13050 Australia/Darwin Northern Territory
AU -3157+11551 Australia/Perth Western Australia (most areas)
AU -3143+12852 Australia/Eucla Western Australia (Eucla)
AW +1230-06958 America/Aruba
AX +6006+01957 Europe/Mariehamn
AZ +4023+04951 Asia/Baku
BA +4352+01825 Europe/Sarajevo
BB +1306-05937 America/Barbados
BD +2343+09025 Asia/Dhaka
BE +5050+00420 Europe/Brussels
BF +1222-00131 Africa/Ouagadougou
BG +4241+02319 Europe/Sofia
BH +2623+05035 Asia/Bahrain
BI -0323+02922 Africa/Bujumbura
BJ +0629+00237 Africa/Porto-Novo
BL +1753-06251 America/St_Barthelemy
BM +3217-06446 Atlantic/Bermuda
BN +0456+11455 Asia/Brunei
BO -1630-06809 America/La_Paz
BQ +120903-0681636 America/Kralendijk
BR -0351-03225 America/Noronha Atlantic islands
BR -0127-04829 America/Belem Para (east); Amapa
BR -0343-03830 America/Fortaleza Brazil (northeast: MA, PI, CE, RN, PB)
BR -0803-03454 America/Recife Pernambuco
BR -0712-04812 America/Araguaina Tocantins
BR -0940-03543 America/Maceio Alagoas, Sergipe
BR -1259-03831 America/Bahia Bahia
BR -2332-04637 America/Sao_Paulo Brazil (southeast: GO, DF, MG, ES, RJ, SP, PR, SC, RS)
BR -2027-05437 America/Campo_Grande Mato Grosso do Sul
BR -1535-05605 America/Cuiaba Mato Grosso
BR -0226-05452 America/Santarem Para (west)
BR -0846-06354 America/Porto_Velho Rondonia
BR +0249-06040 America/Boa_Vista Roraima
BR -0308-06001 America/Manaus Amazonas (east)
BR -0640-06952 America/Eirunepe Amazonas (west)
BR -0958-06748 America/Rio_Branco Acre
BS +2505-07721 America/Nassau
BT +2728+08939 Asia/Thimphu
BW -2439+02555 Africa/Gaborone
BY +5354+02734 Europe/Minsk
BZ +1730-08812 America/Belize
CA +4734-05243 America/St_Johns Newfoundland; Labrador (southeast)
CA +4439-06336 America/Halifax Atlantic - NS (most areas); PE
CA +4612-05957 America/Glace_Bay Atlantic - NS (Cape Breton)
CA +4606-06447 America/Moncton Atlantic - New Brunswick
CA +5320-06025 America/Goose_Bay Atlantic - Labrador (most areas)
CA +5125-05707 America/Blanc-Sablon AST - QC (Lower North Shore)
CA +4339-07923 America/Toronto Eastern - ON, QC (most areas)
CA +4901-08816 America/Nipigon Eastern - ON, QC (no DST 1967-73)
CA +4823-08915 America/Thunder_Bay Eastern - ON (Thunder Bay)
CA +6344-06828 America/Iqaluit Eastern - NU (most east areas)
CA +6608-06544 America/Pangnirtung Eastern - NU (Pangnirtung)
CA +484531-0913718 America/Atikokan EST - ON (Atikokan); NU (Coral H)
CA +4953-09709 America/Winnipeg Central - ON (west); Manitoba
CA +4843-09434 America/Rainy_River Central - ON (Rainy R, Ft Frances)
CA +744144-0944945 America/Resolute Central - NU (Resolute)
CA +624900-0920459 America/Rankin_Inlet Central - NU (central)
CA +5024-10439 America/Regina CST - SK (most areas)
CA +5017-10750 America/Swift_Current CST - SK (midwest)
CA +5333-11328 America/Edmonton Mountain - AB; BC (E); SK (W)
CA +690650-1050310 America/Cambridge_Bay Mountain - NU (west)
CA +6227-11421 America/Yellowknife Mountain - NT (central)
CA +682059-1334300 America/Inuvik Mountain - NT (west)
CA +4906-11631 America/Creston MST - BC (Creston)
CA +5946-12014 America/Dawson_Creek MST - BC (Dawson Cr, Ft St John)
CA +5848-12242 America/Fort_Nelson MST - BC (Ft Nelson)
CA +4916-12307 America/Vancouver Pacific - BC (most areas)
CA +6043-13503 America/Whitehorse Pacific - Yukon (east)
CA +6404-13925 America/Dawson Pacific - Yukon (west)
CC -1210+09655 Indian/Cocos
CD -0418+01518 Africa/Kinshasa Dem. Rep. of Congo (west)
CD -1140+02728 Africa/Lubumbashi Dem. Rep. of Congo (east)
CF +0422+01835 Africa/Bangui
CG -0416+01517 Africa/Brazzaville
CH +4723+00832 Europe/Zurich
CI +0519-00402 Africa/Abidjan
CK -2114-15946 Pacific/Rarotonga
CL -3327-07040 America/Santiago Chile (most areas)
CL -5309-07055 America/Punta_Arenas Region of Magallanes
CL -2709-10926 Pacific/Easter Easter Island
CM +0403+00942 Africa/Douala
CN +3114+12128 Asia/Shanghai Beijing Time
CN +4348+08735 Asia/Urumqi Xinjiang Time
CO +0436-07405 America/Bogota
CR +0956-08405 America/Costa_Rica
CU +2308-08222 America/Havana
CV +1455-02331 Atlantic/Cape_Verde
CW +1211-06900 America/Curacao
CX -1025+10543 Indian/Christmas
CY +3510+03322 Asia/Nicosia Cyprus (most areas)
CY +3507+03357 Asia/Famagusta Northern Cyprus
CZ +5005+01426 Europe/Prague
DE +5230+01322 Europe/Berlin Germany (most areas)
DE +4742+00841 Europe/Busingen Busingen
DJ +1136+04309 Africa/Djibouti
DK +5540+01235 Europe/Copenhagen
DM +1518-06124 America/Dominica
DO +1828-06954 America/Santo_Domingo
DZ +3647+00303 Africa/Algiers
EC -0210-07950 America/Guayaquil Ecuador (mainland)
EC -0054-08936 Pacific/Galapagos Galapagos Islands
EE +5925+02445 Europe/Tallinn
EG +3003+03115 Africa/Cairo
EH +2709-01312 Africa/El_Aaiun
ER +1520+03853 Africa/Asmara
ES +4024-00341 Europe/Madrid Spain (mainland)
ES +3553-00519 Africa/Ceuta Ceuta, Melilla
ES +2806-01524 Atlantic/Canary Canary Islands
ET +0902+03842 Africa/Addis_Ababa
FI +6010+02458 Europe/Helsinki
FJ -1808+17825 Pacific/Fiji
FK -5142-05751 Atlantic/Stanley
FM +0725+15147 Pacific/Chuuk Chuuk/Truk, Yap
FM +0658+15813 Pacific/Pohnpei Pohnpei/Ponape
FM +0519+16259 Pacific/Kosrae Kosrae
FO +6201-00646 Atlantic/Faroe
FR +4852+00220 Europe/Paris
GA +0023+00927 Africa/Libreville
GB +513030-0000731 Europe/London
GD +1203-06145 America/Grenada
GE +4143+04449 Asia/Tbilisi
GF +0456-05220 America/Cayenne
GG +492717-0023210 Europe/Guernsey
GH +0533-00013 Africa/Accra
GI +3608-00521 Europe/Gibraltar
GL +6411-05144 America/Nuuk Greenland (most areas)
GL +7646-01840 America/Danmarkshavn National Park (east coast)
GL +7029-02158 America/Scoresbysund Scoresbysund/Ittoqqortoormiit
GL +7634-06847 America/Thule Thule/Pituffik
GM +1328-01639 Africa/Banjul
GN +0931-01343 Africa/Conakry
GP +1614-06132 America/Guadeloupe
GQ +0345+00847 Africa/Malabo
GR +3758+02343 Europe/Athens
GS -5416-03632 Atlantic/South_Georgia
GT +1438-09031 America/Guatemala
GU +1328+14445 Pacific/Guam
GW +1151-01535 Africa/Bissau
GY +0648-05810 America/Guyana
HK +2217+11409 Asia/Hong_Kong
HN +1406-08713 America/Tegucigalpa
HR +4548+01558 Europe/Zagreb
HT +1832-07220 America/Port-au-Prince
HU +4730+01905 Europe/Budapest
ID -0610+10648 Asia/Jakarta Java, Sumatra
ID -0002+10920 Asia/Pontianak Borneo (west, central)
ID -0507+11924 Asia/Makassar Borneo (east, south); Sulawesi/Celebes, Bali, Nusa Tengarra; Timor (west)
ID -0232+14042 Asia/Jayapura New Guinea (West Papua / Irian Jaya); Malukus/Moluccas
IE +5320-00615 Europe/Dublin
IL +314650+0351326 Asia/Jerusalem
IM +5409-00428 Europe/Isle_of_Man
IN +2232+08822 Asia/Kolkata
IO -0720+07225 Indian/Chagos
IQ +3321+04425 Asia/Baghdad
IR +3540+05126 Asia/Tehran
IS +6409-02151 Atlantic/Reykjavik
IT +4154+01229 Europe/Rome
JE +491101-0020624 Europe/Jersey
JM +175805-0764736 America/Jamaica
JO +3157+03556 Asia/Amman
JP +353916+1394441 Asia/Tokyo
KE -0117+03649 Africa/Nairobi
KG +4254+07436 Asia/Bishkek
KH +1133+10455 Asia/Phnom_Penh
KI +0125+17300 Pacific/Tarawa Gilbert Islands
KI -0308-17105 Pacific/Enderbury Phoenix Islands
KI +0152-15720 Pacific/Kiritimati Line Islands
KM -1141+04316 Indian/Comoro
KN +1718-06243 America/St_Kitts
KP +3901+12545 Asia/Pyongyang
KR +3733+12658 Asia/Seoul
KW +2920+04759 Asia/Kuwait
KY +1918-08123 America/Cayman
KZ +4315+07657 Asia/Almaty Kazakhstan (most areas)
KZ +4448+06528 Asia/Qyzylorda Qyzylorda/Kyzylorda/Kzyl-Orda
KZ +5312+06337 Asia/Qostanay Qostanay/Kostanay/Kustanay
KZ +5017+05710 Asia/Aqtobe Aqtobe/Aktobe
KZ +4431+05016 Asia/Aqtau Mangghystau/Mankistau
KZ +4707+05156 Asia/Atyrau Atyrau/Atirau/Gur'yev
KZ +5113+05121 Asia/Oral West Kazakhstan
LA +1758+10236 Asia/Vientiane
LB +3353+03530 Asia/Beirut
LC +1401-06100 America/St_Lucia
LI +4709+00931 Europe/Vaduz
LK +0656+07951 Asia/Colombo
LR +0618-01047 Africa/Monrovia
LS -2928+02730 Africa/Maseru
LT +5441+02519 Europe/Vilnius
LU +4936+00609 Europe/Luxembourg
LV +5657+02406 Europe/Riga
LY +3254+01311 Africa/Tripoli
MA +3339-00735 Africa/Casablanca
MC +4342+00723 Europe/Monaco
MD +4700+02850 Europe/Chisinau
ME +4226+01916 Europe/Podgorica
MF +1804-06305 America/Marigot
MG -1855+04731 Indian/Antananarivo
MH +0709+17112 Pacific/Majuro Marshall Islands (most areas)
MH +0905+16720 Pacific/Kwajalein Kwajalein
MK +4159+02126 Europe/Skopje
ML +1239-00800 Africa/Bamako
MM +1647+09610 Asia/Yangon
MN +4755+10653 Asia/Ulaanbaatar Mongolia (most areas)
MN +4801+09139 Asia/Hovd Bayan-Olgiy, Govi-Altai, Hovd, Uvs, Zavkhan
MN +4804+11430 Asia/Choibalsan Dornod, Sukhbaatar
MO +221150+1133230 Asia/Macau
MP +1512+14545 Pacific/Saipan
MQ +1436-06105 America/Martinique
MR +1806-01557 Africa/Nouakchott
MS +1643-06213 America/Montserrat
MT +3554+01431 Europe/Malta
MU -2010+05730 Indian/Mauritius
MV +0410+07330 Indian/Maldives
MW -1547+03500 Africa/Blantyre
MX +1924-09909 America/Mexico_City Central Time
MX +2105-08646 America/Cancun Eastern Standard Time - Quintana Roo
MX +2058-08937 America/Merida Central Time - Campeche, Yucatan
MX +2540-10019 America/Monterrey Central Time - Durango; Coahuila, Nuevo Leon, Tamaulipas (most areas)
MX +2550-09730 America/Matamoros Central Time US - Coahuila, Nuevo Leon, Tamaulipas (US border)
MX +2313-10625 America/Mazatlan Mountain Time - Baja California Sur, Nayarit, Sinaloa
MX +2838-10605 America/Chihuahua Mountain Time - Chihuahua (most areas)
MX +2934-10425 America/Ojinaga Mountain Time US - Chihuahua (US border)
MX +2904-11058 America/Hermosillo Mountain Standard Time - Sonora
MX +3232-11701 America/Tijuana Pacific Time US - Baja California
MX +2048-10515 America/Bahia_Banderas Central Time - Bahia de Banderas
MY +0310+10142 Asia/Kuala_Lumpur Malaysia (peninsula)
MY +0133+11020 Asia/Kuching Sabah, Sarawak
MZ -2558+03235 Africa/Maputo
NA -2234+01706 Africa/Windhoek
NC -2216+16627 Pacific/Noumea
NE +1331+00207 Africa/Niamey
NF -2903+16758 Pacific/Norfolk
NG +0627+00324 Africa/Lagos
NI +1209-08617 America/Managua
NL +5222+00454 Europe/Amsterdam
NO +5955+01045 Europe/Oslo
NP +2743+08519 Asia/Kathmandu
NR -0031+16655 Pacific/Nauru
NU -1901-16955 Pacific/Niue
NZ -3652+17446 Pacific/Auckland New Zealand (most areas)
NZ -4357-17633 Pacific/Chatham Chatham Islands
OM +2336+05835 Asia/Muscat
PA +0858-07932 America/Panama
PE -1203-07703 America/Lima
PF -1732-14934 Pacific/Tahiti Society Islands
PF -0900-13930 Pacific/Marquesas Marquesas Islands
PF -2308-13457 Pacific/Gambier Gambier Islands
PG -0930+14710 Pacific/Port_Moresby Papua New Guinea (most areas)
PG -0613+15534 Pacific/Bougainville Bougainville
PH +1435+12100 Asia/Manila
PK +2452+06703 Asia/Karachi
PL +5215+02100 Europe/Warsaw
PM +4703-05620 America/Miquelon
PN -2504-13005 Pacific/Pitcairn
PR +182806-0660622 America/Puerto_Rico
PS +3130+03428 Asia/Gaza Gaza Strip
PS +313200+0350542 Asia/Hebron West Bank
PT +3843-00908 Europe/Lisbon Portugal (mainland)
PT +3238-01654 Atlantic/Madeira Madeira Islands
PT +3744-02540 Atlantic/Azores Azores
PW +0720+13429 Pacific/Palau
PY -2516-05740 America/Asuncion
QA +2517+05132 Asia/Qatar
RE -2052+05528 Indian/Reunion
RO +4426+02606 Europe/Bucharest
RS +4450+02030 Europe/Belgrade
RU +5443+02030 Europe/Kaliningrad MSK-01 - Kaliningrad
RU +554521+0373704 Europe/Moscow MSK+00 - Moscow area
# The obsolescent zone.tab format cannot represent Europe/Simferopol well.
# Put it in RU section and list as UA. See "territorial claims" above.
# Programs should use zone1970.tab instead; see above.
UA +4457+03406 Europe/Simferopol Crimea
RU +5836+04939 Europe/Kirov MSK+00 - Kirov
RU +4621+04803 Europe/Astrakhan MSK+01 - Astrakhan
RU +4844+04425 Europe/Volgograd MSK+01 - Volgograd
RU +5134+04602 Europe/Saratov MSK+01 - Saratov
RU +5420+04824 Europe/Ulyanovsk MSK+01 - Ulyanovsk
RU +5312+05009 Europe/Samara MSK+01 - Samara, Udmurtia
RU +5651+06036 Asia/Yekaterinburg MSK+02 - Urals
RU +5500+07324 Asia/Omsk MSK+03 - Omsk
RU +5502+08255 Asia/Novosibirsk MSK+04 - Novosibirsk
RU +5322+08345 Asia/Barnaul MSK+04 - Altai
RU +5630+08458 Asia/Tomsk MSK+04 - Tomsk
RU +5345+08707 Asia/Novokuznetsk MSK+04 - Kemerovo
RU +5601+09250 Asia/Krasnoyarsk MSK+04 - Krasnoyarsk area
RU +5216+10420 Asia/Irkutsk MSK+05 - Irkutsk, Buryatia
RU +5203+11328 Asia/Chita MSK+06 - Zabaykalsky
RU +6200+12940 Asia/Yakutsk MSK+06 - Lena River
RU +623923+1353314 Asia/Khandyga MSK+06 - Tomponsky, Ust-Maysky
RU +4310+13156 Asia/Vladivostok MSK+07 - Amur River
RU +643337+1431336 Asia/Ust-Nera MSK+07 - Oymyakonsky
RU +5934+15048 Asia/Magadan MSK+08 - Magadan
RU +4658+14242 Asia/Sakhalin MSK+08 - Sakhalin Island
RU +6728+15343 Asia/Srednekolymsk MSK+08 - Sakha (E); North Kuril Is
RU +5301+15839 Asia/Kamchatka MSK+09 - Kamchatka
RU +6445+17729 Asia/Anadyr MSK+09 - Bering Sea
RW -0157+03004 Africa/Kigali
SA +2438+04643 Asia/Riyadh
SB -0932+16012 Pacific/Guadalcanal
SC -0440+05528 Indian/Mahe
SD +1536+03232 Africa/Khartoum
SE +5920+01803 Europe/Stockholm
SG +0117+10351 Asia/Singapore
SH -1555-00542 Atlantic/St_Helena
SI +4603+01431 Europe/Ljubljana
SJ +7800+01600 Arctic/Longyearbyen
SK +4809+01707 Europe/Bratislava
SL +0830-01315 Africa/Freetown
SM +4355+01228 Europe/San_Marino
SN +1440-01726 Africa/Dakar
SO +0204+04522 Africa/Mogadishu
SR +0550-05510 America/Paramaribo
SS +0451+03137 Africa/Juba
ST +0020+00644 Africa/Sao_Tome
SV +1342-08912 America/El_Salvador
SX +180305-0630250 America/Lower_Princes
SY +3330+03618 Asia/Damascus
SZ -2618+03106 Africa/Mbabane
TC +2128-07108 America/Grand_Turk
TD +1207+01503 Africa/Ndjamena
TF -492110+0701303 Indian/Kerguelen
TG +0608+00113 Africa/Lome
TH +1345+10031 Asia/Bangkok
TJ +3835+06848 Asia/Dushanbe
TK -0922-17114 Pacific/Fakaofo
TL -0833+12535 Asia/Dili
TM +3757+05823 Asia/Ashgabat
TN +3648+01011 Africa/Tunis
TO -2110-17510 Pacific/Tongatapu
TR +4101+02858 Europe/Istanbul
TT +1039-06131 America/Port_of_Spain
TV -0831+17913 Pacific/Funafuti
TW +2503+12130 Asia/Taipei
TZ -0648+03917 Africa/Dar_es_Salaam
UA +5026+03031 Europe/Kiev Ukraine (most areas)
UA +4837+02218 Europe/Uzhgorod Transcarpathia
UA +4750+03510 Europe/Zaporozhye Zaporozhye and east Lugansk
UG +0019+03225 Africa/Kampala
UM +2813-17722 Pacific/Midway Midway Islands
UM +1917+16637 Pacific/Wake Wake Island
US +404251-0740023 America/New_York Eastern (most areas)
US +421953-0830245 America/Detroit Eastern - MI (most areas)
US +381515-0854534 America/Kentucky/Louisville Eastern - KY (Louisville area)
US +364947-0845057 America/Kentucky/Monticello Eastern - KY (Wayne)
US +394606-0860929 America/Indiana/Indianapolis Eastern - IN (most areas)
US +384038-0873143 America/Indiana/Vincennes Eastern - IN (Da, Du, K, Mn)
US +410305-0863611 America/Indiana/Winamac Eastern - IN (Pulaski)
US +382232-0862041 America/Indiana/Marengo Eastern - IN (Crawford)
US +382931-0871643 America/Indiana/Petersburg Eastern - IN (Pike)
US +384452-0850402 America/Indiana/Vevay Eastern - IN (Switzerland)
US +415100-0873900 America/Chicago Central (most areas)
US +375711-0864541 America/Indiana/Tell_City Central - IN (Perry)
US +411745-0863730 America/Indiana/Knox Central - IN (Starke)
US +450628-0873651 America/Menominee Central - MI (Wisconsin border)
US +470659-1011757 America/North_Dakota/Center Central - ND (Oliver)
US +465042-1012439 America/North_Dakota/New_Salem Central - ND (Morton rural)
US +471551-1014640 America/North_Dakota/Beulah Central - ND (Mercer)
US +394421-1045903 America/Denver Mountain (most areas)
US +433649-1161209 America/Boise Mountain - ID (south); OR (east)
US +332654-1120424 America/Phoenix MST - Arizona (except Navajo)
US +340308-1181434 America/Los_Angeles Pacific
US +611305-1495401 America/Anchorage Alaska (most areas)
US +581807-1342511 America/Juneau Alaska - Juneau area
US +571035-1351807 America/Sitka Alaska - Sitka area
US +550737-1313435 America/Metlakatla Alaska - Annette Island
US +593249-1394338 America/Yakutat Alaska - Yakutat
US +643004-1652423 America/Nome Alaska (west)
US +515248-1763929 America/Adak Aleutian Islands
US +211825-1575130 Pacific/Honolulu Hawaii
UY -345433-0561245 America/Montevideo
UZ +3940+06648 Asia/Samarkand Uzbekistan (west)
UZ +4120+06918 Asia/Tashkent Uzbekistan (east)
VA +415408+0122711 Europe/Vatican
VC +1309-06114 America/St_Vincent
VE +1030-06656 America/Caracas
VG +1827-06437 America/Tortola
VI +1821-06456 America/St_Thomas
VN +1045+10640 Asia/Ho_Chi_Minh
VU -1740+16825 Pacific/Efate
WF -1318-17610 Pacific/Wallis
WS -1350-17144 Pacific/Apia
YE +1245+04512 Asia/Aden
YT -1247+04514 Indian/Mayotte
ZA -2615+02800 Africa/Johannesburg
ZM -1525+02817 Africa/Lusaka
ZW -1750+03103 Africa/Harare
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/UCT
|
TZif2 UTC TZif2 UTC
UTC0
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/GMT+0
|
TZif2 GMT TZif2 GMT
GMT0
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/tzdata.zi
|
# version unknown
# This zic input file is in the public domain.
R d 1916 o - Jun 14 23s 1 S
R d 1916 1919 - O Su>=1 23s 0 -
R d 1917 o - Mar 24 23s 1 S
R d 1918 o - Mar 9 23s 1 S
R d 1919 o - Mar 1 23s 1 S
R d 1920 o - F 14 23s 1 S
R d 1920 o - O 23 23s 0 -
R d 1921 o - Mar 14 23s 1 S
R d 1921 o - Jun 21 23s 0 -
R d 1939 o - S 11 23s 1 S
R d 1939 o - N 19 1 0 -
R d 1944 1945 - Ap M>=1 2 1 S
R d 1944 o - O 8 2 0 -
R d 1945 o - S 16 1 0 -
R d 1971 o - Ap 25 23s 1 S
R d 1971 o - S 26 23s 0 -
R d 1977 o - May 6 0 1 S
R d 1977 o - O 21 0 0 -
R d 1978 o - Mar 24 1 1 S
R d 1978 o - S 22 3 0 -
R d 1980 o - Ap 25 0 1 S
R d 1980 o - O 31 2 0 -
Z Africa/Algiers 0:12:12 - LMT 1891 Mar 15 0:1
0:9:21 - PMT 1911 Mar 11
0 d WE%sT 1940 F 25 2
1 d CE%sT 1946 O 7
0 - WET 1956 Ja 29
1 - CET 1963 Ap 14
0 d WE%sT 1977 O 21
1 d CE%sT 1979 O 26
0 d WE%sT 1981 May
1 - CET
Z Atlantic/Cape_Verde -1:34:4 - LMT 1912 Ja 1 2u
-2 - -02 1942 S
-2 1 -01 1945 O 15
-2 - -02 1975 N 25 2
-1 - -01
Z Africa/Ndjamena 1:0:12 - LMT 1912
1 - WAT 1979 O 14
1 1 WAST 1980 Mar 8
1 - WAT
Z Africa/Abidjan -0:16:8 - LMT 1912
0 - GMT
L Africa/Abidjan Africa/Bamako
L Africa/Abidjan Africa/Banjul
L Africa/Abidjan Africa/Conakry
L Africa/Abidjan Africa/Dakar
L Africa/Abidjan Africa/Freetown
L Africa/Abidjan Africa/Lome
L Africa/Abidjan Africa/Nouakchott
L Africa/Abidjan Africa/Ouagadougou
L Africa/Abidjan Atlantic/St_Helena
R K 1940 o - Jul 15 0 1 S
R K 1940 o - O 1 0 0 -
R K 1941 o - Ap 15 0 1 S
R K 1941 o - S 16 0 0 -
R K 1942 1944 - Ap 1 0 1 S
R K 1942 o - O 27 0 0 -
R K 1943 1945 - N 1 0 0 -
R K 1945 o - Ap 16 0 1 S
R K 1957 o - May 10 0 1 S
R K 1957 1958 - O 1 0 0 -
R K 1958 o - May 1 0 1 S
R K 1959 1981 - May 1 1 1 S
R K 1959 1965 - S 30 3 0 -
R K 1966 1994 - O 1 3 0 -
R K 1982 o - Jul 25 1 1 S
R K 1983 o - Jul 12 1 1 S
R K 1984 1988 - May 1 1 1 S
R K 1989 o - May 6 1 1 S
R K 1990 1994 - May 1 1 1 S
R K 1995 2010 - Ap lastF 0s 1 S
R K 1995 2005 - S lastTh 24 0 -
R K 2006 o - S 21 24 0 -
R K 2007 o - S Th>=1 24 0 -
R K 2008 o - Au lastTh 24 0 -
R K 2009 o - Au 20 24 0 -
R K 2010 o - Au 10 24 0 -
R K 2010 o - S 9 24 1 S
R K 2010 o - S lastTh 24 0 -
R K 2014 o - May 15 24 1 S
R K 2014 o - Jun 26 24 0 -
R K 2014 o - Jul 31 24 1 S
R K 2014 o - S lastTh 24 0 -
Z Africa/Cairo 2:5:9 - LMT 1900 O
2 K EE%sT
R GH 1920 1942 - S 1 0 0:20 -
R GH 1920 1942 - D 31 0 0 -
Z Africa/Accra -0:0:52 - LMT 1918
0 GH GMT/+0020
Z Africa/Bissau -1:2:20 - LMT 1912 Ja 1 1u
-1 - -01 1975
0 - GMT
Z Africa/Nairobi 2:27:16 - LMT 1928 Jul
3 - EAT 1930
2:30 - +0230 1940
2:45 - +0245 1960
3 - EAT
L Africa/Nairobi Africa/Addis_Ababa
L Africa/Nairobi Africa/Asmara
L Africa/Nairobi Africa/Dar_es_Salaam
L Africa/Nairobi Africa/Djibouti
L Africa/Nairobi Africa/Kampala
L Africa/Nairobi Africa/Mogadishu
L Africa/Nairobi Indian/Antananarivo
L Africa/Nairobi Indian/Comoro
L Africa/Nairobi Indian/Mayotte
Z Africa/Monrovia -0:43:8 - LMT 1882
-0:43:8 - MMT 1919 Mar
-0:44:30 - MMT 1972 Ja 7
0 - GMT
R L 1951 o - O 14 2 1 S
R L 1952 o - Ja 1 0 0 -
R L 1953 o - O 9 2 1 S
R L 1954 o - Ja 1 0 0 -
R L 1955 o - S 30 0 1 S
R L 1956 o - Ja 1 0 0 -
R L 1982 1984 - Ap 1 0 1 S
R L 1982 1985 - O 1 0 0 -
R L 1985 o - Ap 6 0 1 S
R L 1986 o - Ap 4 0 1 S
R L 1986 o - O 3 0 0 -
R L 1987 1989 - Ap 1 0 1 S
R L 1987 1989 - O 1 0 0 -
R L 1997 o - Ap 4 0 1 S
R L 1997 o - O 4 0 0 -
R L 2013 o - Mar lastF 1 1 S
R L 2013 o - O lastF 2 0 -
Z Africa/Tripoli 0:52:44 - LMT 1920
1 L CE%sT 1959
2 - EET 1982
1 L CE%sT 1990 May 4
2 - EET 1996 S 30
1 L CE%sT 1997 O 4
2 - EET 2012 N 10 2
1 L CE%sT 2013 O 25 2
2 - EET
R MU 1982 o - O 10 0 1 -
R MU 1983 o - Mar 21 0 0 -
R MU 2008 o - O lastSu 2 1 -
R MU 2009 o - Mar lastSu 2 0 -
Z Indian/Mauritius 3:50 - LMT 1907
4 MU +04/+05
R M 1939 o - S 12 0 1 -
R M 1939 o - N 19 0 0 -
R M 1940 o - F 25 0 1 -
R M 1945 o - N 18 0 0 -
R M 1950 o - Jun 11 0 1 -
R M 1950 o - O 29 0 0 -
R M 1967 o - Jun 3 12 1 -
R M 1967 o - O 1 0 0 -
R M 1974 o - Jun 24 0 1 -
R M 1974 o - S 1 0 0 -
R M 1976 1977 - May 1 0 1 -
R M 1976 o - Au 1 0 0 -
R M 1977 o - S 28 0 0 -
R M 1978 o - Jun 1 0 1 -
R M 1978 o - Au 4 0 0 -
R M 2008 o - Jun 1 0 1 -
R M 2008 o - S 1 0 0 -
R M 2009 o - Jun 1 0 1 -
R M 2009 o - Au 21 0 0 -
R M 2010 o - May 2 0 1 -
R M 2010 o - Au 8 0 0 -
R M 2011 o - Ap 3 0 1 -
R M 2011 o - Jul 31 0 0 -
R M 2012 2013 - Ap lastSu 2 1 -
R M 2012 o - Jul 20 3 0 -
R M 2012 o - Au 20 2 1 -
R M 2012 o - S 30 3 0 -
R M 2013 o - Jul 7 3 0 -
R M 2013 o - Au 10 2 1 -
R M 2013 2018 - O lastSu 3 0 -
R M 2014 2018 - Mar lastSu 2 1 -
R M 2014 o - Jun 28 3 0 -
R M 2014 o - Au 2 2 1 -
R M 2015 o - Jun 14 3 0 -
R M 2015 o - Jul 19 2 1 -
R M 2016 o - Jun 5 3 0 -
R M 2016 o - Jul 10 2 1 -
R M 2017 o - May 21 3 0 -
R M 2017 o - Jul 2 2 1 -
R M 2018 o - May 13 3 0 -
R M 2018 o - Jun 17 2 1 -
R M 2019 o - May 5 3 -1 -
R M 2019 o - Jun 9 2 0 -
R M 2020 o - Ap 19 3 -1 -
R M 2020 o - May 31 2 0 -
R M 2021 o - Ap 11 3 -1 -
R M 2021 o - May 16 2 0 -
R M 2022 o - Mar 27 3 -1 -
R M 2022 o - May 8 2 0 -
R M 2023 o - Mar 19 3 -1 -
R M 2023 o - Ap 23 2 0 -
R M 2024 o - Mar 10 3 -1 -
R M 2024 o - Ap 14 2 0 -
R M 2025 o - F 23 3 -1 -
R M 2025 o - Ap 6 2 0 -
R M 2026 o - F 15 3 -1 -
R M 2026 o - Mar 22 2 0 -
R M 2027 o - F 7 3 -1 -
R M 2027 o - Mar 14 2 0 -
R M 2028 o - Ja 23 3 -1 -
R M 2028 o - Mar 5 2 0 -
R M 2029 o - Ja 14 3 -1 -
R M 2029 o - F 18 2 0 -
R M 2029 o - D 30 3 -1 -
R M 2030 o - F 10 2 0 -
R M 2030 o - D 22 3 -1 -
R M 2031 o - Ja 26 2 0 -
R M 2031 o - D 14 3 -1 -
R M 2032 o - Ja 18 2 0 -
R M 2032 o - N 28 3 -1 -
R M 2033 o - Ja 9 2 0 -
R M 2033 o - N 20 3 -1 -
R M 2033 o - D 25 2 0 -
R M 2034 o - N 5 3 -1 -
R M 2034 o - D 17 2 0 -
R M 2035 o - O 28 3 -1 -
R M 2035 o - D 9 2 0 -
R M 2036 o - O 19 3 -1 -
R M 2036 o - N 23 2 0 -
R M 2037 o - O 4 3 -1 -
R M 2037 o - N 15 2 0 -
R M 2038 o - S 26 3 -1 -
R M 2038 o - O 31 2 0 -
R M 2039 o - S 18 3 -1 -
R M 2039 o - O 23 2 0 -
R M 2040 o - S 2 3 -1 -
R M 2040 o - O 14 2 0 -
R M 2041 o - Au 25 3 -1 -
R M 2041 o - S 29 2 0 -
R M 2042 o - Au 10 3 -1 -
R M 2042 o - S 21 2 0 -
R M 2043 o - Au 2 3 -1 -
R M 2043 o - S 13 2 0 -
R M 2044 o - Jul 24 3 -1 -
R M 2044 o - Au 28 2 0 -
R M 2045 o - Jul 9 3 -1 -
R M 2045 o - Au 20 2 0 -
R M 2046 o - Jul 1 3 -1 -
R M 2046 o - Au 5 2 0 -
R M 2047 o - Jun 23 3 -1 -
R M 2047 o - Jul 28 2 0 -
R M 2048 o - Jun 7 3 -1 -
R M 2048 o - Jul 19 2 0 -
R M 2049 o - May 30 3 -1 -
R M 2049 o - Jul 4 2 0 -
R M 2050 o - May 15 3 -1 -
R M 2050 o - Jun 26 2 0 -
R M 2051 o - May 7 3 -1 -
R M 2051 o - Jun 18 2 0 -
R M 2052 o - Ap 28 3 -1 -
R M 2052 o - Jun 2 2 0 -
R M 2053 o - Ap 13 3 -1 -
R M 2053 o - May 25 2 0 -
R M 2054 o - Ap 5 3 -1 -
R M 2054 o - May 10 2 0 -
R M 2055 o - Mar 28 3 -1 -
R M 2055 o - May 2 2 0 -
R M 2056 o - Mar 12 3 -1 -
R M 2056 o - Ap 23 2 0 -
R M 2057 o - Mar 4 3 -1 -
R M 2057 o - Ap 8 2 0 -
R M 2058 o - F 17 3 -1 -
R M 2058 o - Mar 31 2 0 -
R M 2059 o - F 9 3 -1 -
R M 2059 o - Mar 23 2 0 -
R M 2060 o - F 1 3 -1 -
R M 2060 o - Mar 7 2 0 -
R M 2061 o - Ja 16 3 -1 -
R M 2061 o - F 27 2 0 -
R M 2062 o - Ja 8 3 -1 -
R M 2062 o - F 12 2 0 -
R M 2062 o - D 31 3 -1 -
R M 2063 o - F 4 2 0 -
R M 2063 o - D 16 3 -1 -
R M 2064 o - Ja 27 2 0 -
R M 2064 o - D 7 3 -1 -
R M 2065 o - Ja 11 2 0 -
R M 2065 o - N 22 3 -1 -
R M 2066 o - Ja 3 2 0 -
R M 2066 o - N 14 3 -1 -
R M 2066 o - D 26 2 0 -
R M 2067 o - N 6 3 -1 -
R M 2067 o - D 11 2 0 -
R M 2068 o - O 21 3 -1 -
R M 2068 o - D 2 2 0 -
R M 2069 o - O 13 3 -1 -
R M 2069 o - N 17 2 0 -
R M 2070 o - O 5 3 -1 -
R M 2070 o - N 9 2 0 -
R M 2071 o - S 20 3 -1 -
R M 2071 o - N 1 2 0 -
R M 2072 o - S 11 3 -1 -
R M 2072 o - O 16 2 0 -
R M 2073 o - Au 27 3 -1 -
R M 2073 o - O 8 2 0 -
R M 2074 o - Au 19 3 -1 -
R M 2074 o - S 30 2 0 -
R M 2075 o - Au 11 3 -1 -
R M 2075 o - S 15 2 0 -
R M 2076 o - Jul 26 3 -1 -
R M 2076 o - S 6 2 0 -
R M 2077 o - Jul 18 3 -1 -
R M 2077 o - Au 22 2 0 -
R M 2078 o - Jul 10 3 -1 -
R M 2078 o - Au 14 2 0 -
R M 2079 o - Jun 25 3 -1 -
R M 2079 o - Au 6 2 0 -
R M 2080 o - Jun 16 3 -1 -
R M 2080 o - Jul 21 2 0 -
R M 2081 o - Jun 1 3 -1 -
R M 2081 o - Jul 13 2 0 -
R M 2082 o - May 24 3 -1 -
R M 2082 o - Jun 28 2 0 -
R M 2083 o - May 16 3 -1 -
R M 2083 o - Jun 20 2 0 -
R M 2084 o - Ap 30 3 -1 -
R M 2084 o - Jun 11 2 0 -
R M 2085 o - Ap 22 3 -1 -
R M 2085 o - May 27 2 0 -
R M 2086 o - Ap 14 3 -1 -
R M 2086 o - May 19 2 0 -
R M 2087 o - Mar 30 3 -1 -
R M 2087 o - May 11 2 0 -
Z Africa/Casablanca -0:30:20 - LMT 1913 O 26
0 M +00/+01 1984 Mar 16
1 - +01 1986
0 M +00/+01 2018 O 28 3
1 M +01/+00
Z Africa/El_Aaiun -0:52:48 - LMT 1934
-1 - -01 1976 Ap 14
0 M +00/+01 2018 O 28 3
1 M +01/+00
Z Africa/Maputo 2:10:20 - LMT 1903 Mar
2 - CAT
L Africa/Maputo Africa/Blantyre
L Africa/Maputo Africa/Bujumbura
L Africa/Maputo Africa/Gaborone
L Africa/Maputo Africa/Harare
L Africa/Maputo Africa/Kigali
L Africa/Maputo Africa/Lubumbashi
L Africa/Maputo Africa/Lusaka
R NA 1994 o - Mar 21 0 -1 WAT
R NA 1994 2017 - S Su>=1 2 0 CAT
R NA 1995 2017 - Ap Su>=1 2 -1 WAT
Z Africa/Windhoek 1:8:24 - LMT 1892 F 8
1:30 - +0130 1903 Mar
2 - SAST 1942 S 20 2
2 1 SAST 1943 Mar 21 2
2 - SAST 1990 Mar 21
2 NA %s
Z Africa/Lagos 0:13:36 - LMT 1919 S
1 - WAT
L Africa/Lagos Africa/Bangui
L Africa/Lagos Africa/Brazzaville
L Africa/Lagos Africa/Douala
L Africa/Lagos Africa/Kinshasa
L Africa/Lagos Africa/Libreville
L Africa/Lagos Africa/Luanda
L Africa/Lagos Africa/Malabo
L Africa/Lagos Africa/Niamey
L Africa/Lagos Africa/Porto-Novo
Z Indian/Reunion 3:41:52 - LMT 1911 Jun
4 - +04
Z Africa/Sao_Tome 0:26:56 - LMT 1884
-0:36:45 - LMT 1912 Ja 1 0u
0 - GMT 2018 Ja 1 1
1 - WAT 2019 Ja 1 2
0 - GMT
Z Indian/Mahe 3:41:48 - LMT 1906 Jun
4 - +04
R SA 1942 1943 - S Su>=15 2 1 -
R SA 1943 1944 - Mar Su>=15 2 0 -
Z Africa/Johannesburg 1:52 - LMT 1892 F 8
1:30 - SAST 1903 Mar
2 SA SAST
L Africa/Johannesburg Africa/Maseru
L Africa/Johannesburg Africa/Mbabane
R SD 1970 o - May 1 0 1 S
R SD 1970 1985 - O 15 0 0 -
R SD 1971 o - Ap 30 0 1 S
R SD 1972 1985 - Ap lastSu 0 1 S
Z Africa/Khartoum 2:10:8 - LMT 1931
2 SD CA%sT 2000 Ja 15 12
3 - EAT 2017 N
2 - CAT
Z Africa/Juba 2:6:28 - LMT 1931
2 SD CA%sT 2000 Ja 15 12
3 - EAT
R n 1939 o - Ap 15 23s 1 S
R n 1939 o - N 18 23s 0 -
R n 1940 o - F 25 23s 1 S
R n 1941 o - O 6 0 0 -
R n 1942 o - Mar 9 0 1 S
R n 1942 o - N 2 3 0 -
R n 1943 o - Mar 29 2 1 S
R n 1943 o - Ap 17 2 0 -
R n 1943 o - Ap 25 2 1 S
R n 1943 o - O 4 2 0 -
R n 1944 1945 - Ap M>=1 2 1 S
R n 1944 o - O 8 0 0 -
R n 1945 o - S 16 0 0 -
R n 1977 o - Ap 30 0s 1 S
R n 1977 o - S 24 0s 0 -
R n 1978 o - May 1 0s 1 S
R n 1978 o - O 1 0s 0 -
R n 1988 o - Jun 1 0s 1 S
R n 1988 1990 - S lastSu 0s 0 -
R n 1989 o - Mar 26 0s 1 S
R n 1990 o - May 1 0s 1 S
R n 2005 o - May 1 0s 1 S
R n 2005 o - S 30 1s 0 -
R n 2006 2008 - Mar lastSu 2s 1 S
R n 2006 2008 - O lastSu 2s 0 -
Z Africa/Tunis 0:40:44 - LMT 1881 May 12
0:9:21 - PMT 1911 Mar 11
1 n CE%sT
Z Antarctica/Casey 0 - -00 1969
8 - +08 2009 O 18 2
11 - +11 2010 Mar 5 2
8 - +08 2011 O 28 2
11 - +11 2012 F 21 17u
8 - +08 2016 O 22
11 - +11 2018 Mar 11 4
8 - +08
Z Antarctica/Davis 0 - -00 1957 Ja 13
7 - +07 1964 N
0 - -00 1969 F
7 - +07 2009 O 18 2
5 - +05 2010 Mar 10 20u
7 - +07 2011 O 28 2
5 - +05 2012 F 21 20u
7 - +07
Z Antarctica/Mawson 0 - -00 1954 F 13
6 - +06 2009 O 18 2
5 - +05
Z Indian/Kerguelen 0 - -00 1950
5 - +05
Z Antarctica/DumontDUrville 0 - -00 1947
10 - +10 1952 Ja 14
0 - -00 1956 N
10 - +10
Z Antarctica/Syowa 0 - -00 1957 Ja 29
3 - +03
R Tr 2005 ma - Mar lastSu 1u 2 +02
R Tr 2004 ma - O lastSu 1u 0 +00
Z Antarctica/Troll 0 - -00 2005 F 12
0 Tr %s
Z Antarctica/Vostok 0 - -00 1957 D 16
6 - +06
Z Antarctica/Rothera 0 - -00 1976 D
-3 - -03
Z Asia/Kabul 4:36:48 - LMT 1890
4 - +04 1945
4:30 - +0430
R AM 2011 o - Mar lastSu 2s 1 -
R AM 2011 o - O lastSu 2s 0 -
Z Asia/Yerevan 2:58 - LMT 1924 May 2
3 - +03 1957 Mar
4 R +04/+05 1991 Mar 31 2s
3 R +03/+04 1995 S 24 2s
4 - +04 1997
4 R +04/+05 2011
4 AM +04/+05
R AZ 1997 2015 - Mar lastSu 4 1 -
R AZ 1997 2015 - O lastSu 5 0 -
Z Asia/Baku 3:19:24 - LMT 1924 May 2
3 - +03 1957 Mar
4 R +04/+05 1991 Mar 31 2s
3 R +03/+04 1992 S lastSu 2s
4 - +04 1996
4 E +04/+05 1997
4 AZ +04/+05
R BD 2009 o - Jun 19 23 1 -
R BD 2009 o - D 31 24 0 -
Z Asia/Dhaka 6:1:40 - LMT 1890
5:53:20 - HMT 1941 O
6:30 - +0630 1942 May 15
5:30 - +0530 1942 S
6:30 - +0630 1951 S 30
6 - +06 2009
6 BD +06/+07
Z Asia/Thimphu 5:58:36 - LMT 1947 Au 15
5:30 - +0530 1987 O
6 - +06
Z Indian/Chagos 4:49:40 - LMT 1907
5 - +05 1996
6 - +06
Z Asia/Brunei 7:39:40 - LMT 1926 Mar
7:30 - +0730 1933
8 - +08
Z Asia/Yangon 6:24:47 - LMT 1880
6:24:47 - RMT 1920
6:30 - +0630 1942 May
9 - +09 1945 May 3
6:30 - +0630
R Sh 1919 o - Ap 12 24 1 D
R Sh 1919 o - S 30 24 0 S
R Sh 1940 o - Jun 1 0 1 D
R Sh 1940 o - O 12 24 0 S
R Sh 1941 o - Mar 15 0 1 D
R Sh 1941 o - N 1 24 0 S
R Sh 1942 o - Ja 31 0 1 D
R Sh 1945 o - S 1 24 0 S
R Sh 1946 o - May 15 0 1 D
R Sh 1946 o - S 30 24 0 S
R Sh 1947 o - Ap 15 0 1 D
R Sh 1947 o - O 31 24 0 S
R Sh 1948 1949 - May 1 0 1 D
R Sh 1948 1949 - S 30 24 0 S
R CN 1986 o - May 4 2 1 D
R CN 1986 1991 - S Su>=11 2 0 S
R CN 1987 1991 - Ap Su>=11 2 1 D
Z Asia/Shanghai 8:5:43 - LMT 1901
8 Sh C%sT 1949 May 28
8 CN C%sT
Z Asia/Urumqi 5:50:20 - LMT 1928
6 - +06
R HK 1946 o - Ap 21 0 1 S
R HK 1946 o - D 1 3:30s 0 -
R HK 1947 o - Ap 13 3:30s 1 S
R HK 1947 o - N 30 3:30s 0 -
R HK 1948 o - May 2 3:30s 1 S
R HK 1948 1952 - O Su>=28 3:30s 0 -
R HK 1949 1953 - Ap Su>=1 3:30 1 S
R HK 1953 1964 - O Su>=31 3:30 0 -
R HK 1954 1964 - Mar Su>=18 3:30 1 S
R HK 1965 1976 - Ap Su>=16 3:30 1 S
R HK 1965 1976 - O Su>=16 3:30 0 -
R HK 1973 o - D 30 3:30 1 S
R HK 1979 o - May 13 3:30 1 S
R HK 1979 o - O 21 3:30 0 -
Z Asia/Hong_Kong 7:36:42 - LMT 1904 O 30 0:36:42
8 - HKT 1941 Jun 15 3
8 1 HKST 1941 O 1 4
8 0:30 HKWT 1941 D 25
9 - JST 1945 N 18 2
8 HK HK%sT
R f 1946 o - May 15 0 1 D
R f 1946 o - O 1 0 0 S
R f 1947 o - Ap 15 0 1 D
R f 1947 o - N 1 0 0 S
R f 1948 1951 - May 1 0 1 D
R f 1948 1951 - O 1 0 0 S
R f 1952 o - Mar 1 0 1 D
R f 1952 1954 - N 1 0 0 S
R f 1953 1959 - Ap 1 0 1 D
R f 1955 1961 - O 1 0 0 S
R f 1960 1961 - Jun 1 0 1 D
R f 1974 1975 - Ap 1 0 1 D
R f 1974 1975 - O 1 0 0 S
R f 1979 o - Jul 1 0 1 D
R f 1979 o - O 1 0 0 S
Z Asia/Taipei 8:6 - LMT 1896
8 - CST 1937 O
9 - JST 1945 S 21 1
8 f C%sT
R _ 1942 1943 - Ap 30 23 1 -
R _ 1942 o - N 17 23 0 -
R _ 1943 o - S 30 23 0 S
R _ 1946 o - Ap 30 23s 1 D
R _ 1946 o - S 30 23s 0 S
R _ 1947 o - Ap 19 23s 1 D
R _ 1947 o - N 30 23s 0 S
R _ 1948 o - May 2 23s 1 D
R _ 1948 o - O 31 23s 0 S
R _ 1949 1950 - Ap Sa>=1 23s 1 D
R _ 1949 1950 - O lastSa 23s 0 S
R _ 1951 o - Mar 31 23s 1 D
R _ 1951 o - O 28 23s 0 S
R _ 1952 1953 - Ap Sa>=1 23s 1 D
R _ 1952 o - N 1 23s 0 S
R _ 1953 1954 - O lastSa 23s 0 S
R _ 1954 1956 - Mar Sa>=17 23s 1 D
R _ 1955 o - N 5 23s 0 S
R _ 1956 1964 - N Su>=1 3:30 0 S
R _ 1957 1964 - Mar Su>=18 3:30 1 D
R _ 1965 1973 - Ap Su>=16 3:30 1 D
R _ 1965 1966 - O Su>=16 2:30 0 S
R _ 1967 1976 - O Su>=16 3:30 0 S
R _ 1973 o - D 30 3:30 1 D
R _ 1975 1976 - Ap Su>=16 3:30 1 D
R _ 1979 o - May 13 3:30 1 D
R _ 1979 o - O Su>=16 3:30 0 S
Z Asia/Macau 7:34:10 - LMT 1904 O 30
8 - CST 1941 D 21 23
9 _ +09/+10 1945 S 30 24
8 _ C%sT
R CY 1975 o - Ap 13 0 1 S
R CY 1975 o - O 12 0 0 -
R CY 1976 o - May 15 0 1 S
R CY 1976 o - O 11 0 0 -
R CY 1977 1980 - Ap Su>=1 0 1 S
R CY 1977 o - S 25 0 0 -
R CY 1978 o - O 2 0 0 -
R CY 1979 1997 - S lastSu 0 0 -
R CY 1981 1998 - Mar lastSu 0 1 S
Z Asia/Nicosia 2:13:28 - LMT 1921 N 14
2 CY EE%sT 1998 S
2 E EE%sT
Z Asia/Famagusta 2:15:48 - LMT 1921 N 14
2 CY EE%sT 1998 S
2 E EE%sT 2016 S 8
3 - +03 2017 O 29 1u
2 E EE%sT
L Asia/Nicosia Europe/Nicosia
Z Asia/Tbilisi 2:59:11 - LMT 1880
2:59:11 - TBMT 1924 May 2
3 - +03 1957 Mar
4 R +04/+05 1991 Mar 31 2s
3 R +03/+04 1992
3 e +03/+04 1994 S lastSu
4 e +04/+05 1996 O lastSu
4 1 +05 1997 Mar lastSu
4 e +04/+05 2004 Jun 27
3 R +03/+04 2005 Mar lastSu 2
4 - +04
Z Asia/Dili 8:22:20 - LMT 1912
8 - +08 1942 F 21 23
9 - +09 1976 May 3
8 - +08 2000 S 17
9 - +09
Z Asia/Kolkata 5:53:28 - LMT 1854 Jun 28
5:53:20 - HMT 1870
5:21:10 - MMT 1906
5:30 - IST 1941 O
5:30 1 +0630 1942 May 15
5:30 - IST 1942 S
5:30 1 +0630 1945 O 15
5:30 - IST
Z Asia/Jakarta 7:7:12 - LMT 1867 Au 10
7:7:12 - BMT 1923 D 31 23:47:12
7:20 - +0720 1932 N
7:30 - +0730 1942 Mar 23
9 - +09 1945 S 23
7:30 - +0730 1948 May
8 - +08 1950 May
7:30 - +0730 1964
7 - WIB
Z Asia/Pontianak 7:17:20 - LMT 1908 May
7:17:20 - PMT 1932 N
7:30 - +0730 1942 Ja 29
9 - +09 1945 S 23
7:30 - +0730 1948 May
8 - +08 1950 May
7:30 - +0730 1964
8 - WITA 1988
7 - WIB
Z Asia/Makassar 7:57:36 - LMT 1920
7:57:36 - MMT 1932 N
8 - +08 1942 F 9
9 - +09 1945 S 23
8 - WITA
Z Asia/Jayapura 9:22:48 - LMT 1932 N
9 - +09 1944 S
9:30 - +0930 1964
9 - WIT
R i 1978 1980 - Mar 20 24 1 -
R i 1978 o - O 20 24 0 -
R i 1979 o - S 18 24 0 -
R i 1980 o - S 22 24 0 -
R i 1991 o - May 2 24 1 -
R i 1992 1995 - Mar 21 24 1 -
R i 1991 1995 - S 21 24 0 -
R i 1996 o - Mar 20 24 1 -
R i 1996 o - S 20 24 0 -
R i 1997 1999 - Mar 21 24 1 -
R i 1997 1999 - S 21 24 0 -
R i 2000 o - Mar 20 24 1 -
R i 2000 o - S 20 24 0 -
R i 2001 2003 - Mar 21 24 1 -
R i 2001 2003 - S 21 24 0 -
R i 2004 o - Mar 20 24 1 -
R i 2004 o - S 20 24 0 -
R i 2005 o - Mar 21 24 1 -
R i 2005 o - S 21 24 0 -
R i 2008 o - Mar 20 24 1 -
R i 2008 o - S 20 24 0 -
R i 2009 2011 - Mar 21 24 1 -
R i 2009 2011 - S 21 24 0 -
R i 2012 o - Mar 20 24 1 -
R i 2012 o - S 20 24 0 -
R i 2013 2015 - Mar 21 24 1 -
R i 2013 2015 - S 21 24 0 -
R i 2016 o - Mar 20 24 1 -
R i 2016 o - S 20 24 0 -
R i 2017 2019 - Mar 21 24 1 -
R i 2017 2019 - S 21 24 0 -
R i 2020 o - Mar 20 24 1 -
R i 2020 o - S 20 24 0 -
R i 2021 2023 - Mar 21 24 1 -
R i 2021 2023 - S 21 24 0 -
R i 2024 o - Mar 20 24 1 -
R i 2024 o - S 20 24 0 -
R i 2025 2027 - Mar 21 24 1 -
R i 2025 2027 - S 21 24 0 -
R i 2028 2029 - Mar 20 24 1 -
R i 2028 2029 - S 20 24 0 -
R i 2030 2031 - Mar 21 24 1 -
R i 2030 2031 - S 21 24 0 -
R i 2032 2033 - Mar 20 24 1 -
R i 2032 2033 - S 20 24 0 -
R i 2034 2035 - Mar 21 24 1 -
R i 2034 2035 - S 21 24 0 -
R i 2036 2037 - Mar 20 24 1 -
R i 2036 2037 - S 20 24 0 -
R i 2038 2039 - Mar 21 24 1 -
R i 2038 2039 - S 21 24 0 -
R i 2040 2041 - Mar 20 24 1 -
R i 2040 2041 - S 20 24 0 -
R i 2042 2043 - Mar 21 24 1 -
R i 2042 2043 - S 21 24 0 -
R i 2044 2045 - Mar 20 24 1 -
R i 2044 2045 - S 20 24 0 -
R i 2046 2047 - Mar 21 24 1 -
R i 2046 2047 - S 21 24 0 -
R i 2048 2049 - Mar 20 24 1 -
R i 2048 2049 - S 20 24 0 -
R i 2050 2051 - Mar 21 24 1 -
R i 2050 2051 - S 21 24 0 -
R i 2052 2053 - Mar 20 24 1 -
R i 2052 2053 - S 20 24 0 -
R i 2054 2055 - Mar 21 24 1 -
R i 2054 2055 - S 21 24 0 -
R i 2056 2057 - Mar 20 24 1 -
R i 2056 2057 - S 20 24 0 -
R i 2058 2059 - Mar 21 24 1 -
R i 2058 2059 - S 21 24 0 -
R i 2060 2062 - Mar 20 24 1 -
R i 2060 2062 - S 20 24 0 -
R i 2063 o - Mar 21 24 1 -
R i 2063 o - S 21 24 0 -
R i 2064 2066 - Mar 20 24 1 -
R i 2064 2066 - S 20 24 0 -
R i 2067 o - Mar 21 24 1 -
R i 2067 o - S 21 24 0 -
R i 2068 2070 - Mar 20 24 1 -
R i 2068 2070 - S 20 24 0 -
R i 2071 o - Mar 21 24 1 -
R i 2071 o - S 21 24 0 -
R i 2072 2074 - Mar 20 24 1 -
R i 2072 2074 - S 20 24 0 -
R i 2075 o - Mar 21 24 1 -
R i 2075 o - S 21 24 0 -
R i 2076 2078 - Mar 20 24 1 -
R i 2076 2078 - S 20 24 0 -
R i 2079 o - Mar 21 24 1 -
R i 2079 o - S 21 24 0 -
R i 2080 2082 - Mar 20 24 1 -
R i 2080 2082 - S 20 24 0 -
R i 2083 o - Mar 21 24 1 -
R i 2083 o - S 21 24 0 -
R i 2084 2086 - Mar 20 24 1 -
R i 2084 2086 - S 20 24 0 -
R i 2087 o - Mar 21 24 1 -
R i 2087 o - S 21 24 0 -
R i 2088 ma - Mar 20 24 1 -
R i 2088 ma - S 20 24 0 -
Z Asia/Tehran 3:25:44 - LMT 1916
3:25:44 - TMT 1946
3:30 - +0330 1977 N
4 i +04/+05 1979
3:30 i +0330/+0430
R IQ 1982 o - May 1 0 1 -
R IQ 1982 1984 - O 1 0 0 -
R IQ 1983 o - Mar 31 0 1 -
R IQ 1984 1985 - Ap 1 0 1 -
R IQ 1985 1990 - S lastSu 1s 0 -
R IQ 1986 1990 - Mar lastSu 1s 1 -
R IQ 1991 2007 - Ap 1 3s 1 -
R IQ 1991 2007 - O 1 3s 0 -
Z Asia/Baghdad 2:57:40 - LMT 1890
2:57:36 - BMT 1918
3 - +03 1982 May
3 IQ +03/+04
R Z 1940 o - Jun 1 0 1 D
R Z 1942 1944 - N 1 0 0 S
R Z 1943 o - Ap 1 2 1 D
R Z 1944 o - Ap 1 0 1 D
R Z 1945 o - Ap 16 0 1 D
R Z 1945 o - N 1 2 0 S
R Z 1946 o - Ap 16 2 1 D
R Z 1946 o - N 1 0 0 S
R Z 1948 o - May 23 0 2 DD
R Z 1948 o - S 1 0 1 D
R Z 1948 1949 - N 1 2 0 S
R Z 1949 o - May 1 0 1 D
R Z 1950 o - Ap 16 0 1 D
R Z 1950 o - S 15 3 0 S
R Z 1951 o - Ap 1 0 1 D
R Z 1951 o - N 11 3 0 S
R Z 1952 o - Ap 20 2 1 D
R Z 1952 o - O 19 3 0 S
R Z 1953 o - Ap 12 2 1 D
R Z 1953 o - S 13 3 0 S
R Z 1954 o - Jun 13 0 1 D
R Z 1954 o - S 12 0 0 S
R Z 1955 o - Jun 11 2 1 D
R Z 1955 o - S 11 0 0 S
R Z 1956 o - Jun 3 0 1 D
R Z 1956 o - S 30 3 0 S
R Z 1957 o - Ap 29 2 1 D
R Z 1957 o - S 22 0 0 S
R Z 1974 o - Jul 7 0 1 D
R Z 1974 o - O 13 0 0 S
R Z 1975 o - Ap 20 0 1 D
R Z 1975 o - Au 31 0 0 S
R Z 1980 o - Au 2 0 1 D
R Z 1980 o - S 13 1 0 S
R Z 1984 o - May 5 0 1 D
R Z 1984 o - Au 25 1 0 S
R Z 1985 o - Ap 14 0 1 D
R Z 1985 o - S 15 0 0 S
R Z 1986 o - May 18 0 1 D
R Z 1986 o - S 7 0 0 S
R Z 1987 o - Ap 15 0 1 D
R Z 1987 o - S 13 0 0 S
R Z 1988 o - Ap 10 0 1 D
R Z 1988 o - S 4 0 0 S
R Z 1989 o - Ap 30 0 1 D
R Z 1989 o - S 3 0 0 S
R Z 1990 o - Mar 25 0 1 D
R Z 1990 o - Au 26 0 0 S
R Z 1991 o - Mar 24 0 1 D
R Z 1991 o - S 1 0 0 S
R Z 1992 o - Mar 29 0 1 D
R Z 1992 o - S 6 0 0 S
R Z 1993 o - Ap 2 0 1 D
R Z 1993 o - S 5 0 0 S
R Z 1994 o - Ap 1 0 1 D
R Z 1994 o - Au 28 0 0 S
R Z 1995 o - Mar 31 0 1 D
R Z 1995 o - S 3 0 0 S
R Z 1996 o - Mar 15 0 1 D
R Z 1996 o - S 16 0 0 S
R Z 1997 o - Mar 21 0 1 D
R Z 1997 o - S 14 0 0 S
R Z 1998 o - Mar 20 0 1 D
R Z 1998 o - S 6 0 0 S
R Z 1999 o - Ap 2 2 1 D
R Z 1999 o - S 3 2 0 S
R Z 2000 o - Ap 14 2 1 D
R Z 2000 o - O 6 1 0 S
R Z 2001 o - Ap 9 1 1 D
R Z 2001 o - S 24 1 0 S
R Z 2002 o - Mar 29 1 1 D
R Z 2002 o - O 7 1 0 S
R Z 2003 o - Mar 28 1 1 D
R Z 2003 o - O 3 1 0 S
R Z 2004 o - Ap 7 1 1 D
R Z 2004 o - S 22 1 0 S
R Z 2005 2012 - Ap F<=1 2 1 D
R Z 2005 o - O 9 2 0 S
R Z 2006 o - O 1 2 0 S
R Z 2007 o - S 16 2 0 S
R Z 2008 o - O 5 2 0 S
R Z 2009 o - S 27 2 0 S
R Z 2010 o - S 12 2 0 S
R Z 2011 o - O 2 2 0 S
R Z 2012 o - S 23 2 0 S
R Z 2013 ma - Mar F>=23 2 1 D
R Z 2013 ma - O lastSu 2 0 S
Z Asia/Jerusalem 2:20:54 - LMT 1880
2:20:40 - JMT 1918
2 Z I%sT
R JP 1948 o - May Sa>=1 24 1 D
R JP 1948 1951 - S Sa>=8 25 0 S
R JP 1949 o - Ap Sa>=1 24 1 D
R JP 1950 1951 - May Sa>=1 24 1 D
Z Asia/Tokyo 9:18:59 - LMT 1887 D 31 15u
9 JP J%sT
R J 1973 o - Jun 6 0 1 S
R J 1973 1975 - O 1 0 0 -
R J 1974 1977 - May 1 0 1 S
R J 1976 o - N 1 0 0 -
R J 1977 o - O 1 0 0 -
R J 1978 o - Ap 30 0 1 S
R J 1978 o - S 30 0 0 -
R J 1985 o - Ap 1 0 1 S
R J 1985 o - O 1 0 0 -
R J 1986 1988 - Ap F>=1 0 1 S
R J 1986 1990 - O F>=1 0 0 -
R J 1989 o - May 8 0 1 S
R J 1990 o - Ap 27 0 1 S
R J 1991 o - Ap 17 0 1 S
R J 1991 o - S 27 0 0 -
R J 1992 o - Ap 10 0 1 S
R J 1992 1993 - O F>=1 0 0 -
R J 1993 1998 - Ap F>=1 0 1 S
R J 1994 o - S F>=15 0 0 -
R J 1995 1998 - S F>=15 0s 0 -
R J 1999 o - Jul 1 0s 1 S
R J 1999 2002 - S lastF 0s 0 -
R J 2000 2001 - Mar lastTh 0s 1 S
R J 2002 2012 - Mar lastTh 24 1 S
R J 2003 o - O 24 0s 0 -
R J 2004 o - O 15 0s 0 -
R J 2005 o - S lastF 0s 0 -
R J 2006 2011 - O lastF 0s 0 -
R J 2013 o - D 20 0 0 -
R J 2014 ma - Mar lastTh 24 1 S
R J 2014 ma - O lastF 0s 0 -
Z Asia/Amman 2:23:44 - LMT 1931
2 J EE%sT
Z Asia/Almaty 5:7:48 - LMT 1924 May 2
5 - +05 1930 Jun 21
6 R +06/+07 1991 Mar 31 2s
5 R +05/+06 1992 Ja 19 2s
6 R +06/+07 2004 O 31 2s
6 - +06
Z Asia/Qyzylorda 4:21:52 - LMT 1924 May 2
4 - +04 1930 Jun 21
5 - +05 1981 Ap
5 1 +06 1981 O
6 - +06 1982 Ap
5 R +05/+06 1991 Mar 31 2s
4 R +04/+05 1991 S 29 2s
5 R +05/+06 1992 Ja 19 2s
6 R +06/+07 1992 Mar 29 2s
5 R +05/+06 2004 O 31 2s
6 - +06 2018 D 21
5 - +05
Z Asia/Qostanay 4:14:28 - LMT 1924 May 2
4 - +04 1930 Jun 21
5 - +05 1981 Ap
5 1 +06 1981 O
6 - +06 1982 Ap
5 R +05/+06 1991 Mar 31 2s
4 R +04/+05 1992 Ja 19 2s
5 R +05/+06 2004 O 31 2s
6 - +06
Z Asia/Aqtobe 3:48:40 - LMT 1924 May 2
4 - +04 1930 Jun 21
5 - +05 1981 Ap
5 1 +06 1981 O
6 - +06 1982 Ap
5 R +05/+06 1991 Mar 31 2s
4 R +04/+05 1992 Ja 19 2s
5 R +05/+06 2004 O 31 2s
5 - +05
Z Asia/Aqtau 3:21:4 - LMT 1924 May 2
4 - +04 1930 Jun 21
5 - +05 1981 O
6 - +06 1982 Ap
5 R +05/+06 1991 Mar 31 2s
4 R +04/+05 1992 Ja 19 2s
5 R +05/+06 1994 S 25 2s
4 R +04/+05 2004 O 31 2s
5 - +05
Z Asia/Atyrau 3:27:44 - LMT 1924 May 2
3 - +03 1930 Jun 21
5 - +05 1981 O
6 - +06 1982 Ap
5 R +05/+06 1991 Mar 31 2s
4 R +04/+05 1992 Ja 19 2s
5 R +05/+06 1999 Mar 28 2s
4 R +04/+05 2004 O 31 2s
5 - +05
Z Asia/Oral 3:25:24 - LMT 1924 May 2
3 - +03 1930 Jun 21
5 - +05 1981 Ap
5 1 +06 1981 O
6 - +06 1982 Ap
5 R +05/+06 1989 Mar 26 2s
4 R +04/+05 1992 Ja 19 2s
5 R +05/+06 1992 Mar 29 2s
4 R +04/+05 2004 O 31 2s
5 - +05
R KG 1992 1996 - Ap Su>=7 0s 1 -
R KG 1992 1996 - S lastSu 0 0 -
R KG 1997 2005 - Mar lastSu 2:30 1 -
R KG 1997 2004 - O lastSu 2:30 0 -
Z Asia/Bishkek 4:58:24 - LMT 1924 May 2
5 - +05 1930 Jun 21
6 R +06/+07 1991 Mar 31 2s
5 R +05/+06 1991 Au 31 2
5 KG +05/+06 2005 Au 12
6 - +06
R KR 1948 o - Jun 1 0 1 D
R KR 1948 o - S 12 24 0 S
R KR 1949 o - Ap 3 0 1 D
R KR 1949 1951 - S Sa>=7 24 0 S
R KR 1950 o - Ap 1 0 1 D
R KR 1951 o - May 6 0 1 D
R KR 1955 o - May 5 0 1 D
R KR 1955 o - S 8 24 0 S
R KR 1956 o - May 20 0 1 D
R KR 1956 o - S 29 24 0 S
R KR 1957 1960 - May Su>=1 0 1 D
R KR 1957 1960 - S Sa>=17 24 0 S
R KR 1987 1988 - May Su>=8 2 1 D
R KR 1987 1988 - O Su>=8 3 0 S
Z Asia/Seoul 8:27:52 - LMT 1908 Ap
8:30 - KST 1912
9 - JST 1945 S 8
9 KR K%sT 1954 Mar 21
8:30 KR K%sT 1961 Au 10
9 KR K%sT
Z Asia/Pyongyang 8:23 - LMT 1908 Ap
8:30 - KST 1912
9 - JST 1945 Au 24
9 - KST 2015 Au 15
8:30 - KST 2018 May 4 23:30
9 - KST
R l 1920 o - Mar 28 0 1 S
R l 1920 o - O 25 0 0 -
R l 1921 o - Ap 3 0 1 S
R l 1921 o - O 3 0 0 -
R l 1922 o - Mar 26 0 1 S
R l 1922 o - O 8 0 0 -
R l 1923 o - Ap 22 0 1 S
R l 1923 o - S 16 0 0 -
R l 1957 1961 - May 1 0 1 S
R l 1957 1961 - O 1 0 0 -
R l 1972 o - Jun 22 0 1 S
R l 1972 1977 - O 1 0 0 -
R l 1973 1977 - May 1 0 1 S
R l 1978 o - Ap 30 0 1 S
R l 1978 o - S 30 0 0 -
R l 1984 1987 - May 1 0 1 S
R l 1984 1991 - O 16 0 0 -
R l 1988 o - Jun 1 0 1 S
R l 1989 o - May 10 0 1 S
R l 1990 1992 - May 1 0 1 S
R l 1992 o - O 4 0 0 -
R l 1993 ma - Mar lastSu 0 1 S
R l 1993 1998 - S lastSu 0 0 -
R l 1999 ma - O lastSu 0 0 -
Z Asia/Beirut 2:22 - LMT 1880
2 l EE%sT
R NB 1935 1941 - S 14 0 0:20 -
R NB 1935 1941 - D 14 0 0 -
Z Asia/Kuala_Lumpur 6:46:46 - LMT 1901
6:55:25 - SMT 1905 Jun
7 - +07 1933
7 0:20 +0720 1936
7:20 - +0720 1941 S
7:30 - +0730 1942 F 16
9 - +09 1945 S 12
7:30 - +0730 1982
8 - +08
Z Asia/Kuching 7:21:20 - LMT 1926 Mar
7:30 - +0730 1933
8 NB +08/+0820 1942 F 16
9 - +09 1945 S 12
8 - +08
Z Indian/Maldives 4:54 - LMT 1880
4:54 - MMT 1960
5 - +05
R X 1983 1984 - Ap 1 0 1 -
R X 1983 o - O 1 0 0 -
R X 1985 1998 - Mar lastSu 0 1 -
R X 1984 1998 - S lastSu 0 0 -
R X 2001 o - Ap lastSa 2 1 -
R X 2001 2006 - S lastSa 2 0 -
R X 2002 2006 - Mar lastSa 2 1 -
R X 2015 2016 - Mar lastSa 2 1 -
R X 2015 2016 - S lastSa 0 0 -
Z Asia/Hovd 6:6:36 - LMT 1905 Au
6 - +06 1978
7 X +07/+08
Z Asia/Ulaanbaatar 7:7:32 - LMT 1905 Au
7 - +07 1978
8 X +08/+09
Z Asia/Choibalsan 7:38 - LMT 1905 Au
7 - +07 1978
8 - +08 1983 Ap
9 X +09/+10 2008 Mar 31
8 X +08/+09
Z Asia/Kathmandu 5:41:16 - LMT 1920
5:30 - +0530 1986
5:45 - +0545
R PK 2002 o - Ap Su>=2 0 1 S
R PK 2002 o - O Su>=2 0 0 -
R PK 2008 o - Jun 1 0 1 S
R PK 2008 2009 - N 1 0 0 -
R PK 2009 o - Ap 15 0 1 S
Z Asia/Karachi 4:28:12 - LMT 1907
5:30 - +0530 1942 S
5:30 1 +0630 1945 O 15
5:30 - +0530 1951 S 30
5 - +05 1971 Mar 26
5 PK PK%sT
R P 1999 2005 - Ap F>=15 0 1 S
R P 1999 2003 - O F>=15 0 0 -
R P 2004 o - O 1 1 0 -
R P 2005 o - O 4 2 0 -
R P 2006 2007 - Ap 1 0 1 S
R P 2006 o - S 22 0 0 -
R P 2007 o - S Th>=8 2 0 -
R P 2008 2009 - Mar lastF 0 1 S
R P 2008 o - S 1 0 0 -
R P 2009 o - S F>=1 1 0 -
R P 2010 o - Mar 26 0 1 S
R P 2010 o - Au 11 0 0 -
R P 2011 o - Ap 1 0:1 1 S
R P 2011 o - Au 1 0 0 -
R P 2011 o - Au 30 0 1 S
R P 2011 o - S 30 0 0 -
R P 2012 2014 - Mar lastTh 24 1 S
R P 2012 o - S 21 1 0 -
R P 2013 o - S F>=21 0 0 -
R P 2014 2015 - O F>=21 0 0 -
R P 2015 o - Mar lastF 24 1 S
R P 2016 2018 - Mar Sa>=24 1 1 S
R P 2016 ma - O lastSa 1 0 -
R P 2019 ma - Mar lastF 0 1 S
Z Asia/Gaza 2:17:52 - LMT 1900 O
2 Z EET/EEST 1948 May 15
2 K EE%sT 1967 Jun 5
2 Z I%sT 1996
2 J EE%sT 1999
2 P EE%sT 2008 Au 29
2 - EET 2008 S
2 P EE%sT 2010
2 - EET 2010 Mar 27 0:1
2 P EE%sT 2011 Au
2 - EET 2012
2 P EE%sT
Z Asia/Hebron 2:20:23 - LMT 1900 O
2 Z EET/EEST 1948 May 15
2 K EE%sT 1967 Jun 5
2 Z I%sT 1996
2 J EE%sT 1999
2 P EE%sT
R PH 1936 o - N 1 0 1 D
R PH 1937 o - F 1 0 0 S
R PH 1954 o - Ap 12 0 1 D
R PH 1954 o - Jul 1 0 0 S
R PH 1978 o - Mar 22 0 1 D
R PH 1978 o - S 21 0 0 S
Z Asia/Manila -15:56 - LMT 1844 D 31
8:4 - LMT 1899 May 11
8 PH P%sT 1942 May
9 - JST 1944 N
8 PH P%sT
Z Asia/Qatar 3:26:8 - LMT 1920
4 - +04 1972 Jun
3 - +03
L Asia/Qatar Asia/Bahrain
Z Asia/Riyadh 3:6:52 - LMT 1947 Mar 14
3 - +03
L Asia/Riyadh Asia/Aden
L Asia/Riyadh Asia/Kuwait
Z Asia/Singapore 6:55:25 - LMT 1901
6:55:25 - SMT 1905 Jun
7 - +07 1933
7 0:20 +0720 1936
7:20 - +0720 1941 S
7:30 - +0730 1942 F 16
9 - +09 1945 S 12
7:30 - +0730 1982
8 - +08
Z Asia/Colombo 5:19:24 - LMT 1880
5:19:32 - MMT 1906
5:30 - +0530 1942 Ja 5
5:30 0:30 +06 1942 S
5:30 1 +0630 1945 O 16 2
5:30 - +0530 1996 May 25
6:30 - +0630 1996 O 26 0:30
6 - +06 2006 Ap 15 0:30
5:30 - +0530
R S 1920 1923 - Ap Su>=15 2 1 S
R S 1920 1923 - O Su>=1 2 0 -
R S 1962 o - Ap 29 2 1 S
R S 1962 o - O 1 2 0 -
R S 1963 1965 - May 1 2 1 S
R S 1963 o - S 30 2 0 -
R S 1964 o - O 1 2 0 -
R S 1965 o - S 30 2 0 -
R S 1966 o - Ap 24 2 1 S
R S 1966 1976 - O 1 2 0 -
R S 1967 1978 - May 1 2 1 S
R S 1977 1978 - S 1 2 0 -
R S 1983 1984 - Ap 9 2 1 S
R S 1983 1984 - O 1 2 0 -
R S 1986 o - F 16 2 1 S
R S 1986 o - O 9 2 0 -
R S 1987 o - Mar 1 2 1 S
R S 1987 1988 - O 31 2 0 -
R S 1988 o - Mar 15 2 1 S
R S 1989 o - Mar 31 2 1 S
R S 1989 o - O 1 2 0 -
R S 1990 o - Ap 1 2 1 S
R S 1990 o - S 30 2 0 -
R S 1991 o - Ap 1 0 1 S
R S 1991 1992 - O 1 0 0 -
R S 1992 o - Ap 8 0 1 S
R S 1993 o - Mar 26 0 1 S
R S 1993 o - S 25 0 0 -
R S 1994 1996 - Ap 1 0 1 S
R S 1994 2005 - O 1 0 0 -
R S 1997 1998 - Mar lastM 0 1 S
R S 1999 2006 - Ap 1 0 1 S
R S 2006 o - S 22 0 0 -
R S 2007 o - Mar lastF 0 1 S
R S 2007 o - N F>=1 0 0 -
R S 2008 o - Ap F>=1 0 1 S
R S 2008 o - N 1 0 0 -
R S 2009 o - Mar lastF 0 1 S
R S 2010 2011 - Ap F>=1 0 1 S
R S 2012 ma - Mar lastF 0 1 S
R S 2009 ma - O lastF 0 0 -
Z Asia/Damascus 2:25:12 - LMT 1920
2 S EE%sT
Z Asia/Dushanbe 4:35:12 - LMT 1924 May 2
5 - +05 1930 Jun 21
6 R +06/+07 1991 Mar 31 2s
5 1 +05/+06 1991 S 9 2s
5 - +05
Z Asia/Bangkok 6:42:4 - LMT 1880
6:42:4 - BMT 1920 Ap
7 - +07
L Asia/Bangkok Asia/Phnom_Penh
L Asia/Bangkok Asia/Vientiane
Z Asia/Ashgabat 3:53:32 - LMT 1924 May 2
4 - +04 1930 Jun 21
5 R +05/+06 1991 Mar 31 2
4 R +04/+05 1992 Ja 19 2
5 - +05
Z Asia/Dubai 3:41:12 - LMT 1920
4 - +04
L Asia/Dubai Asia/Muscat
Z Asia/Samarkand 4:27:53 - LMT 1924 May 2
4 - +04 1930 Jun 21
5 - +05 1981 Ap
5 1 +06 1981 O
6 - +06 1982 Ap
5 R +05/+06 1992
5 - +05
Z Asia/Tashkent 4:37:11 - LMT 1924 May 2
5 - +05 1930 Jun 21
6 R +06/+07 1991 Mar 31 2
5 R +05/+06 1992
5 - +05
Z Asia/Ho_Chi_Minh 7:6:40 - LMT 1906 Jul
7:6:30 - PLMT 1911 May
7 - +07 1942 D 31 23
8 - +08 1945 Mar 14 23
9 - +09 1945 S 2
7 - +07 1947 Ap
8 - +08 1955 Jul
7 - +07 1959 D 31 23
8 - +08 1975 Jun 13
7 - +07
R AU 1917 o - Ja 1 0:1 1 D
R AU 1917 o - Mar 25 2 0 S
R AU 1942 o - Ja 1 2 1 D
R AU 1942 o - Mar 29 2 0 S
R AU 1942 o - S 27 2 1 D
R AU 1943 1944 - Mar lastSu 2 0 S
R AU 1943 o - O 3 2 1 D
Z Australia/Darwin 8:43:20 - LMT 1895 F
9 - ACST 1899 May
9:30 AU AC%sT
R AW 1974 o - O lastSu 2s 1 D
R AW 1975 o - Mar Su>=1 2s 0 S
R AW 1983 o - O lastSu 2s 1 D
R AW 1984 o - Mar Su>=1 2s 0 S
R AW 1991 o - N 17 2s 1 D
R AW 1992 o - Mar Su>=1 2s 0 S
R AW 2006 o - D 3 2s 1 D
R AW 2007 2009 - Mar lastSu 2s 0 S
R AW 2007 2008 - O lastSu 2s 1 D
Z Australia/Perth 7:43:24 - LMT 1895 D
8 AU AW%sT 1943 Jul
8 AW AW%sT
Z Australia/Eucla 8:35:28 - LMT 1895 D
8:45 AU +0845/+0945 1943 Jul
8:45 AW +0845/+0945
R AQ 1971 o - O lastSu 2s 1 D
R AQ 1972 o - F lastSu 2s 0 S
R AQ 1989 1991 - O lastSu 2s 1 D
R AQ 1990 1992 - Mar Su>=1 2s 0 S
R Ho 1992 1993 - O lastSu 2s 1 D
R Ho 1993 1994 - Mar Su>=1 2s 0 S
Z Australia/Brisbane 10:12:8 - LMT 1895
10 AU AE%sT 1971
10 AQ AE%sT
Z Australia/Lindeman 9:55:56 - LMT 1895
10 AU AE%sT 1971
10 AQ AE%sT 1992 Jul
10 Ho AE%sT
R AS 1971 1985 - O lastSu 2s 1 D
R AS 1986 o - O 19 2s 1 D
R AS 1987 2007 - O lastSu 2s 1 D
R AS 1972 o - F 27 2s 0 S
R AS 1973 1985 - Mar Su>=1 2s 0 S
R AS 1986 1990 - Mar Su>=15 2s 0 S
R AS 1991 o - Mar 3 2s 0 S
R AS 1992 o - Mar 22 2s 0 S
R AS 1993 o - Mar 7 2s 0 S
R AS 1994 o - Mar 20 2s 0 S
R AS 1995 2005 - Mar lastSu 2s 0 S
R AS 2006 o - Ap 2 2s 0 S
R AS 2007 o - Mar lastSu 2s 0 S
R AS 2008 ma - Ap Su>=1 2s 0 S
R AS 2008 ma - O Su>=1 2s 1 D
Z Australia/Adelaide 9:14:20 - LMT 1895 F
9 - ACST 1899 May
9:30 AU AC%sT 1971
9:30 AS AC%sT
R AT 1967 o - O Su>=1 2s 1 D
R AT 1968 o - Mar lastSu 2s 0 S
R AT 1968 1985 - O lastSu 2s 1 D
R AT 1969 1971 - Mar Su>=8 2s 0 S
R AT 1972 o - F lastSu 2s 0 S
R AT 1973 1981 - Mar Su>=1 2s 0 S
R AT 1982 1983 - Mar lastSu 2s 0 S
R AT 1984 1986 - Mar Su>=1 2s 0 S
R AT 1986 o - O Su>=15 2s 1 D
R AT 1987 1990 - Mar Su>=15 2s 0 S
R AT 1987 o - O Su>=22 2s 1 D
R AT 1988 1990 - O lastSu 2s 1 D
R AT 1991 1999 - O Su>=1 2s 1 D
R AT 1991 2005 - Mar lastSu 2s 0 S
R AT 2000 o - Au lastSu 2s 1 D
R AT 2001 ma - O Su>=1 2s 1 D
R AT 2006 o - Ap Su>=1 2s 0 S
R AT 2007 o - Mar lastSu 2s 0 S
R AT 2008 ma - Ap Su>=1 2s 0 S
Z Australia/Hobart 9:49:16 - LMT 1895 S
10 - AEST 1916 O 1 2
10 1 AEDT 1917 F
10 AU AE%sT 1967
10 AT AE%sT
Z Australia/Currie 9:35:28 - LMT 1895 S
10 - AEST 1916 O 1 2
10 1 AEDT 1917 F
10 AU AE%sT 1971 Jul
10 AT AE%sT
R AV 1971 1985 - O lastSu 2s 1 D
R AV 1972 o - F lastSu 2s 0 S
R AV 1973 1985 - Mar Su>=1 2s 0 S
R AV 1986 1990 - Mar Su>=15 2s 0 S
R AV 1986 1987 - O Su>=15 2s 1 D
R AV 1988 1999 - O lastSu 2s 1 D
R AV 1991 1994 - Mar Su>=1 2s 0 S
R AV 1995 2005 - Mar lastSu 2s 0 S
R AV 2000 o - Au lastSu 2s 1 D
R AV 2001 2007 - O lastSu 2s 1 D
R AV 2006 o - Ap Su>=1 2s 0 S
R AV 2007 o - Mar lastSu 2s 0 S
R AV 2008 ma - Ap Su>=1 2s 0 S
R AV 2008 ma - O Su>=1 2s 1 D
Z Australia/Melbourne 9:39:52 - LMT 1895 F
10 AU AE%sT 1971
10 AV AE%sT
R AN 1971 1985 - O lastSu 2s 1 D
R AN 1972 o - F 27 2s 0 S
R AN 1973 1981 - Mar Su>=1 2s 0 S
R AN 1982 o - Ap Su>=1 2s 0 S
R AN 1983 1985 - Mar Su>=1 2s 0 S
R AN 1986 1989 - Mar Su>=15 2s 0 S
R AN 1986 o - O 19 2s 1 D
R AN 1987 1999 - O lastSu 2s 1 D
R AN 1990 1995 - Mar Su>=1 2s 0 S
R AN 1996 2005 - Mar lastSu 2s 0 S
R AN 2000 o - Au lastSu 2s 1 D
R AN 2001 2007 - O lastSu 2s 1 D
R AN 2006 o - Ap Su>=1 2s 0 S
R AN 2007 o - Mar lastSu 2s 0 S
R AN 2008 ma - Ap Su>=1 2s 0 S
R AN 2008 ma - O Su>=1 2s 1 D
Z Australia/Sydney 10:4:52 - LMT 1895 F
10 AU AE%sT 1971
10 AN AE%sT
Z Australia/Broken_Hill 9:25:48 - LMT 1895 F
10 - AEST 1896 Au 23
9 - ACST 1899 May
9:30 AU AC%sT 1971
9:30 AN AC%sT 2000
9:30 AS AC%sT
R LH 1981 1984 - O lastSu 2 1 -
R LH 1982 1985 - Mar Su>=1 2 0 -
R LH 1985 o - O lastSu 2 0:30 -
R LH 1986 1989 - Mar Su>=15 2 0 -
R LH 1986 o - O 19 2 0:30 -
R LH 1987 1999 - O lastSu 2 0:30 -
R LH 1990 1995 - Mar Su>=1 2 0 -
R LH 1996 2005 - Mar lastSu 2 0 -
R LH 2000 o - Au lastSu 2 0:30 -
R LH 2001 2007 - O lastSu 2 0:30 -
R LH 2006 o - Ap Su>=1 2 0 -
R LH 2007 o - Mar lastSu 2 0 -
R LH 2008 ma - Ap Su>=1 2 0 -
R LH 2008 ma - O Su>=1 2 0:30 -
Z Australia/Lord_Howe 10:36:20 - LMT 1895 F
10 - AEST 1981 Mar
10:30 LH +1030/+1130 1985 Jul
10:30 LH +1030/+11
Z Antarctica/Macquarie 0 - -00 1899 N
10 - AEST 1916 O 1 2
10 1 AEDT 1917 F
10 AU AE%sT 1919 Ap 1 0s
0 - -00 1948 Mar 25
10 AU AE%sT 1967
10 AT AE%sT 2010 Ap 4 3
11 - +11
Z Indian/Christmas 7:2:52 - LMT 1895 F
7 - +07
Z Indian/Cocos 6:27:40 - LMT 1900
6:30 - +0630
R FJ 1998 1999 - N Su>=1 2 1 -
R FJ 1999 2000 - F lastSu 3 0 -
R FJ 2009 o - N 29 2 1 -
R FJ 2010 o - Mar lastSu 3 0 -
R FJ 2010 2013 - O Su>=21 2 1 -
R FJ 2011 o - Mar Su>=1 3 0 -
R FJ 2012 2013 - Ja Su>=18 3 0 -
R FJ 2014 o - Ja Su>=18 2 0 -
R FJ 2014 2018 - N Su>=1 2 1 -
R FJ 2015 ma - Ja Su>=12 3 0 -
R FJ 2019 ma - N Su>=8 2 1 -
Z Pacific/Fiji 11:55:44 - LMT 1915 O 26
12 FJ +12/+13
Z Pacific/Gambier -8:59:48 - LMT 1912 O
-9 - -09
Z Pacific/Marquesas -9:18 - LMT 1912 O
-9:30 - -0930
Z Pacific/Tahiti -9:58:16 - LMT 1912 O
-10 - -10
R Gu 1959 o - Jun 27 2 1 D
R Gu 1961 o - Ja 29 2 0 S
R Gu 1967 o - S 1 2 1 D
R Gu 1969 o - Ja 26 0:1 0 S
R Gu 1969 o - Jun 22 2 1 D
R Gu 1969 o - Au 31 2 0 S
R Gu 1970 1971 - Ap lastSu 2 1 D
R Gu 1970 1971 - S Su>=1 2 0 S
R Gu 1973 o - D 16 2 1 D
R Gu 1974 o - F 24 2 0 S
R Gu 1976 o - May 26 2 1 D
R Gu 1976 o - Au 22 2:1 0 S
R Gu 1977 o - Ap 24 2 1 D
R Gu 1977 o - Au 28 2 0 S
Z Pacific/Guam -14:21 - LMT 1844 D 31
9:39 - LMT 1901
10 - GST 1941 D 10
9 - +09 1944 Jul 31
10 Gu G%sT 2000 D 23
10 - ChST
L Pacific/Guam Pacific/Saipan
Z Pacific/Tarawa 11:32:4 - LMT 1901
12 - +12
Z Pacific/Enderbury -11:24:20 - LMT 1901
-12 - -12 1979 O
-11 - -11 1994 D 31
13 - +13
Z Pacific/Kiritimati -10:29:20 - LMT 1901
-10:40 - -1040 1979 O
-10 - -10 1994 D 31
14 - +14
Z Pacific/Majuro 11:24:48 - LMT 1901
11 - +11 1914 O
9 - +09 1919 F
11 - +11 1937
10 - +10 1941 Ap
9 - +09 1944 Ja 30
11 - +11 1969 O
12 - +12
Z Pacific/Kwajalein 11:9:20 - LMT 1901
11 - +11 1937
10 - +10 1941 Ap
9 - +09 1944 F 6
11 - +11 1969 O
-12 - -12 1993 Au 20 24
12 - +12
Z Pacific/Chuuk -13:52:52 - LMT 1844 D 31
10:7:8 - LMT 1901
10 - +10 1914 O
9 - +09 1919 F
10 - +10 1941 Ap
9 - +09 1945 Au
10 - +10
Z Pacific/Pohnpei -13:27:8 - LMT 1844 D 31
10:32:52 - LMT 1901
11 - +11 1914 O
9 - +09 1919 F
11 - +11 1937
10 - +10 1941 Ap
9 - +09 1945 Au
11 - +11
Z Pacific/Kosrae -13:8:4 - LMT 1844 D 31
10:51:56 - LMT 1901
11 - +11 1914 O
9 - +09 1919 F
11 - +11 1937
10 - +10 1941 Ap
9 - +09 1945 Au
11 - +11 1969 O
12 - +12 1999
11 - +11
Z Pacific/Nauru 11:7:40 - LMT 1921 Ja 15
11:30 - +1130 1942 Au 29
9 - +09 1945 S 8
11:30 - +1130 1979 F 10 2
12 - +12
R NC 1977 1978 - D Su>=1 0 1 -
R NC 1978 1979 - F 27 0 0 -
R NC 1996 o - D 1 2s 1 -
R NC 1997 o - Mar 2 2s 0 -
Z Pacific/Noumea 11:5:48 - LMT 1912 Ja 13
11 NC +11/+12
R NZ 1927 o - N 6 2 1 S
R NZ 1928 o - Mar 4 2 0 M
R NZ 1928 1933 - O Su>=8 2 0:30 S
R NZ 1929 1933 - Mar Su>=15 2 0 M
R NZ 1934 1940 - Ap lastSu 2 0 M
R NZ 1934 1940 - S lastSu 2 0:30 S
R NZ 1946 o - Ja 1 0 0 S
R NZ 1974 o - N Su>=1 2s 1 D
R k 1974 o - N Su>=1 2:45s 1 -
R NZ 1975 o - F lastSu 2s 0 S
R k 1975 o - F lastSu 2:45s 0 -
R NZ 1975 1988 - O lastSu 2s 1 D
R k 1975 1988 - O lastSu 2:45s 1 -
R NZ 1976 1989 - Mar Su>=1 2s 0 S
R k 1976 1989 - Mar Su>=1 2:45s 0 -
R NZ 1989 o - O Su>=8 2s 1 D
R k 1989 o - O Su>=8 2:45s 1 -
R NZ 1990 2006 - O Su>=1 2s 1 D
R k 1990 2006 - O Su>=1 2:45s 1 -
R NZ 1990 2007 - Mar Su>=15 2s 0 S
R k 1990 2007 - Mar Su>=15 2:45s 0 -
R NZ 2007 ma - S lastSu 2s 1 D
R k 2007 ma - S lastSu 2:45s 1 -
R NZ 2008 ma - Ap Su>=1 2s 0 S
R k 2008 ma - Ap Su>=1 2:45s 0 -
Z Pacific/Auckland 11:39:4 - LMT 1868 N 2
11:30 NZ NZ%sT 1946
12 NZ NZ%sT
Z Pacific/Chatham 12:13:48 - LMT 1868 N 2
12:15 - +1215 1946
12:45 k +1245/+1345
L Pacific/Auckland Antarctica/McMurdo
R CK 1978 o - N 12 0 0:30 -
R CK 1979 1991 - Mar Su>=1 0 0 -
R CK 1979 1990 - O lastSu 0 0:30 -
Z Pacific/Rarotonga -10:39:4 - LMT 1901
-10:30 - -1030 1978 N 12
-10 CK -10/-0930
Z Pacific/Niue -11:19:40 - LMT 1901
-11:20 - -1120 1951
-11:30 - -1130 1978 O
-11 - -11
Z Pacific/Norfolk 11:11:52 - LMT 1901
11:12 - +1112 1951
11:30 - +1130 1974 O 27 2s
11:30 1 +1230 1975 Mar 2 2s
11:30 - +1130 2015 O 4 2s
11 - +11 2019 Jul
11 AN +11/+12
Z Pacific/Palau -15:2:4 - LMT 1844 D 31
8:57:56 - LMT 1901
9 - +09
Z Pacific/Port_Moresby 9:48:40 - LMT 1880
9:48:32 - PMMT 1895
10 - +10
Z Pacific/Bougainville 10:22:16 - LMT 1880
9:48:32 - PMMT 1895
10 - +10 1942 Jul
9 - +09 1945 Au 21
10 - +10 2014 D 28 2
11 - +11
Z Pacific/Pitcairn -8:40:20 - LMT 1901
-8:30 - -0830 1998 Ap 27
-8 - -08
Z Pacific/Pago_Pago 12:37:12 - LMT 1892 Jul 5
-11:22:48 - LMT 1911
-11 - SST
L Pacific/Pago_Pago Pacific/Midway
R WS 2010 o - S lastSu 0 1 -
R WS 2011 o - Ap Sa>=1 4 0 -
R WS 2011 o - S lastSa 3 1 -
R WS 2012 ma - Ap Su>=1 4 0 -
R WS 2012 ma - S lastSu 3 1 -
Z Pacific/Apia 12:33:4 - LMT 1892 Jul 5
-11:26:56 - LMT 1911
-11:30 - -1130 1950
-11 WS -11/-10 2011 D 29 24
13 WS +13/+14
Z Pacific/Guadalcanal 10:39:48 - LMT 1912 O
11 - +11
Z Pacific/Fakaofo -11:24:56 - LMT 1901
-11 - -11 2011 D 30
13 - +13
R TO 1999 o - O 7 2s 1 -
R TO 2000 o - Mar 19 2s 0 -
R TO 2000 2001 - N Su>=1 2 1 -
R TO 2001 2002 - Ja lastSu 2 0 -
R TO 2016 o - N Su>=1 2 1 -
R TO 2017 o - Ja Su>=15 3 0 -
Z Pacific/Tongatapu 12:19:20 - LMT 1901
12:20 - +1220 1941
13 - +13 1999
13 TO +13/+14
Z Pacific/Funafuti 11:56:52 - LMT 1901
12 - +12
Z Pacific/Wake 11:6:28 - LMT 1901
12 - +12
R VU 1983 o - S 25 0 1 -
R VU 1984 1991 - Mar Su>=23 0 0 -
R VU 1984 o - O 23 0 1 -
R VU 1985 1991 - S Su>=23 0 1 -
R VU 1992 1993 - Ja Su>=23 0 0 -
R VU 1992 o - O Su>=23 0 1 -
Z Pacific/Efate 11:13:16 - LMT 1912 Ja 13
11 VU +11/+12
Z Pacific/Wallis 12:15:20 - LMT 1901
12 - +12
R G 1916 o - May 21 2s 1 BST
R G 1916 o - O 1 2s 0 GMT
R G 1917 o - Ap 8 2s 1 BST
R G 1917 o - S 17 2s 0 GMT
R G 1918 o - Mar 24 2s 1 BST
R G 1918 o - S 30 2s 0 GMT
R G 1919 o - Mar 30 2s 1 BST
R G 1919 o - S 29 2s 0 GMT
R G 1920 o - Mar 28 2s 1 BST
R G 1920 o - O 25 2s 0 GMT
R G 1921 o - Ap 3 2s 1 BST
R G 1921 o - O 3 2s 0 GMT
R G 1922 o - Mar 26 2s 1 BST
R G 1922 o - O 8 2s 0 GMT
R G 1923 o - Ap Su>=16 2s 1 BST
R G 1923 1924 - S Su>=16 2s 0 GMT
R G 1924 o - Ap Su>=9 2s 1 BST
R G 1925 1926 - Ap Su>=16 2s 1 BST
R G 1925 1938 - O Su>=2 2s 0 GMT
R G 1927 o - Ap Su>=9 2s 1 BST
R G 1928 1929 - Ap Su>=16 2s 1 BST
R G 1930 o - Ap Su>=9 2s 1 BST
R G 1931 1932 - Ap Su>=16 2s 1 BST
R G 1933 o - Ap Su>=9 2s 1 BST
R G 1934 o - Ap Su>=16 2s 1 BST
R G 1935 o - Ap Su>=9 2s 1 BST
R G 1936 1937 - Ap Su>=16 2s 1 BST
R G 1938 o - Ap Su>=9 2s 1 BST
R G 1939 o - Ap Su>=16 2s 1 BST
R G 1939 o - N Su>=16 2s 0 GMT
R G 1940 o - F Su>=23 2s 1 BST
R G 1941 o - May Su>=2 1s 2 BDST
R G 1941 1943 - Au Su>=9 1s 1 BST
R G 1942 1944 - Ap Su>=2 1s 2 BDST
R G 1944 o - S Su>=16 1s 1 BST
R G 1945 o - Ap M>=2 1s 2 BDST
R G 1945 o - Jul Su>=9 1s 1 BST
R G 1945 1946 - O Su>=2 2s 0 GMT
R G 1946 o - Ap Su>=9 2s 1 BST
R G 1947 o - Mar 16 2s 1 BST
R G 1947 o - Ap 13 1s 2 BDST
R G 1947 o - Au 10 1s 1 BST
R G 1947 o - N 2 2s 0 GMT
R G 1948 o - Mar 14 2s 1 BST
R G 1948 o - O 31 2s 0 GMT
R G 1949 o - Ap 3 2s 1 BST
R G 1949 o - O 30 2s 0 GMT
R G 1950 1952 - Ap Su>=14 2s 1 BST
R G 1950 1952 - O Su>=21 2s 0 GMT
R G 1953 o - Ap Su>=16 2s 1 BST
R G 1953 1960 - O Su>=2 2s 0 GMT
R G 1954 o - Ap Su>=9 2s 1 BST
R G 1955 1956 - Ap Su>=16 2s 1 BST
R G 1957 o - Ap Su>=9 2s 1 BST
R G 1958 1959 - Ap Su>=16 2s 1 BST
R G 1960 o - Ap Su>=9 2s 1 BST
R G 1961 1963 - Mar lastSu 2s 1 BST
R G 1961 1968 - O Su>=23 2s 0 GMT
R G 1964 1967 - Mar Su>=19 2s 1 BST
R G 1968 o - F 18 2s 1 BST
R G 1972 1980 - Mar Su>=16 2s 1 BST
R G 1972 1980 - O Su>=23 2s 0 GMT
R G 1981 1995 - Mar lastSu 1u 1 BST
R G 1981 1989 - O Su>=23 1u 0 GMT
R G 1990 1995 - O Su>=22 1u 0 GMT
Z Europe/London -0:1:15 - LMT 1847 D 1 0s
0 G %s 1968 O 27
1 - BST 1971 O 31 2u
0 G %s 1996
0 E GMT/BST
L Europe/London Europe/Jersey
L Europe/London Europe/Guernsey
L Europe/London Europe/Isle_of_Man
R IE 1971 o - O 31 2u -1 -
R IE 1972 1980 - Mar Su>=16 2u 0 -
R IE 1972 1980 - O Su>=23 2u -1 -
R IE 1981 ma - Mar lastSu 1u 0 -
R IE 1981 1989 - O Su>=23 1u -1 -
R IE 1990 1995 - O Su>=22 1u -1 -
R IE 1996 ma - O lastSu 1u -1 -
Z Europe/Dublin -0:25 - LMT 1880 Au 2
-0:25:21 - DMT 1916 May 21 2s
-0:25:21 1 IST 1916 O 1 2s
0 G %s 1921 D 6
0 G GMT/IST 1940 F 25 2s
0 1 IST 1946 O 6 2s
0 - GMT 1947 Mar 16 2s
0 1 IST 1947 N 2 2s
0 - GMT 1948 Ap 18 2s
0 G GMT/IST 1968 O 27
1 IE IST/GMT
R E 1977 1980 - Ap Su>=1 1u 1 S
R E 1977 o - S lastSu 1u 0 -
R E 1978 o - O 1 1u 0 -
R E 1979 1995 - S lastSu 1u 0 -
R E 1981 ma - Mar lastSu 1u 1 S
R E 1996 ma - O lastSu 1u 0 -
R W- 1977 1980 - Ap Su>=1 1s 1 S
R W- 1977 o - S lastSu 1s 0 -
R W- 1978 o - O 1 1s 0 -
R W- 1979 1995 - S lastSu 1s 0 -
R W- 1981 ma - Mar lastSu 1s 1 S
R W- 1996 ma - O lastSu 1s 0 -
R c 1916 o - Ap 30 23 1 S
R c 1916 o - O 1 1 0 -
R c 1917 1918 - Ap M>=15 2s 1 S
R c 1917 1918 - S M>=15 2s 0 -
R c 1940 o - Ap 1 2s 1 S
R c 1942 o - N 2 2s 0 -
R c 1943 o - Mar 29 2s 1 S
R c 1943 o - O 4 2s 0 -
R c 1944 1945 - Ap M>=1 2s 1 S
R c 1944 o - O 2 2s 0 -
R c 1945 o - S 16 2s 0 -
R c 1977 1980 - Ap Su>=1 2s 1 S
R c 1977 o - S lastSu 2s 0 -
R c 1978 o - O 1 2s 0 -
R c 1979 1995 - S lastSu 2s 0 -
R c 1981 ma - Mar lastSu 2s 1 S
R c 1996 ma - O lastSu 2s 0 -
R e 1977 1980 - Ap Su>=1 0 1 S
R e 1977 o - S lastSu 0 0 -
R e 1978 o - O 1 0 0 -
R e 1979 1995 - S lastSu 0 0 -
R e 1981 ma - Mar lastSu 0 1 S
R e 1996 ma - O lastSu 0 0 -
R R 1917 o - Jul 1 23 1 MST
R R 1917 o - D 28 0 0 MMT
R R 1918 o - May 31 22 2 MDST
R R 1918 o - S 16 1 1 MST
R R 1919 o - May 31 23 2 MDST
R R 1919 o - Jul 1 0u 1 MSD
R R 1919 o - Au 16 0 0 MSK
R R 1921 o - F 14 23 1 MSD
R R 1921 o - Mar 20 23 2 +05
R R 1921 o - S 1 0 1 MSD
R R 1921 o - O 1 0 0 -
R R 1981 1984 - Ap 1 0 1 S
R R 1981 1983 - O 1 0 0 -
R R 1984 1995 - S lastSu 2s 0 -
R R 1985 2010 - Mar lastSu 2s 1 S
R R 1996 2010 - O lastSu 2s 0 -
Z WET 0 E WE%sT
Z CET 1 c CE%sT
Z MET 1 c ME%sT
Z EET 2 E EE%sT
R q 1940 o - Jun 16 0 1 S
R q 1942 o - N 2 3 0 -
R q 1943 o - Mar 29 2 1 S
R q 1943 o - Ap 10 3 0 -
R q 1974 o - May 4 0 1 S
R q 1974 o - O 2 0 0 -
R q 1975 o - May 1 0 1 S
R q 1975 o - O 2 0 0 -
R q 1976 o - May 2 0 1 S
R q 1976 o - O 3 0 0 -
R q 1977 o - May 8 0 1 S
R q 1977 o - O 2 0 0 -
R q 1978 o - May 6 0 1 S
R q 1978 o - O 1 0 0 -
R q 1979 o - May 5 0 1 S
R q 1979 o - S 30 0 0 -
R q 1980 o - May 3 0 1 S
R q 1980 o - O 4 0 0 -
R q 1981 o - Ap 26 0 1 S
R q 1981 o - S 27 0 0 -
R q 1982 o - May 2 0 1 S
R q 1982 o - O 3 0 0 -
R q 1983 o - Ap 18 0 1 S
R q 1983 o - O 1 0 0 -
R q 1984 o - Ap 1 0 1 S
Z Europe/Tirane 1:19:20 - LMT 1914
1 - CET 1940 Jun 16
1 q CE%sT 1984 Jul
1 E CE%sT
Z Europe/Andorra 0:6:4 - LMT 1901
0 - WET 1946 S 30
1 - CET 1985 Mar 31 2
1 E CE%sT
R a 1920 o - Ap 5 2s 1 S
R a 1920 o - S 13 2s 0 -
R a 1946 o - Ap 14 2s 1 S
R a 1946 o - O 7 2s 0 -
R a 1947 1948 - O Su>=1 2s 0 -
R a 1947 o - Ap 6 2s 1 S
R a 1948 o - Ap 18 2s 1 S
R a 1980 o - Ap 6 0 1 S
R a 1980 o - S 28 0 0 -
Z Europe/Vienna 1:5:21 - LMT 1893 Ap
1 c CE%sT 1920
1 a CE%sT 1940 Ap 1 2s
1 c CE%sT 1945 Ap 2 2s
1 1 CEST 1945 Ap 12 2s
1 - CET 1946
1 a CE%sT 1981
1 E CE%sT
Z Europe/Minsk 1:50:16 - LMT 1880
1:50 - MMT 1924 May 2
2 - EET 1930 Jun 21
3 - MSK 1941 Jun 28
1 c CE%sT 1944 Jul 3
3 R MSK/MSD 1990
3 - MSK 1991 Mar 31 2s
2 R EE%sT 2011 Mar 27 2s
3 - +03
R b 1918 o - Mar 9 0s 1 S
R b 1918 1919 - O Sa>=1 23s 0 -
R b 1919 o - Mar 1 23s 1 S
R b 1920 o - F 14 23s 1 S
R b 1920 o - O 23 23s 0 -
R b 1921 o - Mar 14 23s 1 S
R b 1921 o - O 25 23s 0 -
R b 1922 o - Mar 25 23s 1 S
R b 1922 1927 - O Sa>=1 23s 0 -
R b 1923 o - Ap 21 23s 1 S
R b 1924 o - Mar 29 23s 1 S
R b 1925 o - Ap 4 23s 1 S
R b 1926 o - Ap 17 23s 1 S
R b 1927 o - Ap 9 23s 1 S
R b 1928 o - Ap 14 23s 1 S
R b 1928 1938 - O Su>=2 2s 0 -
R b 1929 o - Ap 21 2s 1 S
R b 1930 o - Ap 13 2s 1 S
R b 1931 o - Ap 19 2s 1 S
R b 1932 o - Ap 3 2s 1 S
R b 1933 o - Mar 26 2s 1 S
R b 1934 o - Ap 8 2s 1 S
R b 1935 o - Mar 31 2s 1 S
R b 1936 o - Ap 19 2s 1 S
R b 1937 o - Ap 4 2s 1 S
R b 1938 o - Mar 27 2s 1 S
R b 1939 o - Ap 16 2s 1 S
R b 1939 o - N 19 2s 0 -
R b 1940 o - F 25 2s 1 S
R b 1944 o - S 17 2s 0 -
R b 1945 o - Ap 2 2s 1 S
R b 1945 o - S 16 2s 0 -
R b 1946 o - May 19 2s 1 S
R b 1946 o - O 7 2s 0 -
Z Europe/Brussels 0:17:30 - LMT 1880
0:17:30 - BMT 1892 May 1 0:17:30
0 - WET 1914 N 8
1 - CET 1916 May
1 c CE%sT 1918 N 11 11u
0 b WE%sT 1940 May 20 2s
1 c CE%sT 1944 S 3
1 b CE%sT 1977
1 E CE%sT
R BG 1979 o - Mar 31 23 1 S
R BG 1979 o - O 1 1 0 -
R BG 1980 1982 - Ap Sa>=1 23 1 S
R BG 1980 o - S 29 1 0 -
R BG 1981 o - S 27 2 0 -
Z Europe/Sofia 1:33:16 - LMT 1880
1:56:56 - IMT 1894 N 30
2 - EET 1942 N 2 3
1 c CE%sT 1945
1 - CET 1945 Ap 2 3
2 - EET 1979 Mar 31 23
2 BG EE%sT 1982 S 26 3
2 c EE%sT 1991
2 e EE%sT 1997
2 E EE%sT
R CZ 1945 o - Ap M>=1 2s 1 S
R CZ 1945 o - O 1 2s 0 -
R CZ 1946 o - May 6 2s 1 S
R CZ 1946 1949 - O Su>=1 2s 0 -
R CZ 1947 1948 - Ap Su>=15 2s 1 S
R CZ 1949 o - Ap 9 2s 1 S
Z Europe/Prague 0:57:44 - LMT 1850
0:57:44 - PMT 1891 O
1 c CE%sT 1945 May 9
1 CZ CE%sT 1946 D 1 3
1 -1 GMT 1947 F 23 2
1 CZ CE%sT 1979
1 E CE%sT
R D 1916 o - May 14 23 1 S
R D 1916 o - S 30 23 0 -
R D 1940 o - May 15 0 1 S
R D 1945 o - Ap 2 2s 1 S
R D 1945 o - Au 15 2s 0 -
R D 1946 o - May 1 2s 1 S
R D 1946 o - S 1 2s 0 -
R D 1947 o - May 4 2s 1 S
R D 1947 o - Au 10 2s 0 -
R D 1948 o - May 9 2s 1 S
R D 1948 o - Au 8 2s 0 -
Z Europe/Copenhagen 0:50:20 - LMT 1890
0:50:20 - CMT 1894
1 D CE%sT 1942 N 2 2s
1 c CE%sT 1945 Ap 2 2
1 D CE%sT 1980
1 E CE%sT
Z Atlantic/Faroe -0:27:4 - LMT 1908 Ja 11
0 - WET 1981
0 E WE%sT
R Th 1991 1992 - Mar lastSu 2 1 D
R Th 1991 1992 - S lastSu 2 0 S
R Th 1993 2006 - Ap Su>=1 2 1 D
R Th 1993 2006 - O lastSu 2 0 S
R Th 2007 ma - Mar Su>=8 2 1 D
R Th 2007 ma - N Su>=1 2 0 S
Z America/Danmarkshavn -1:14:40 - LMT 1916 Jul 28
-3 - -03 1980 Ap 6 2
-3 E -03/-02 1996
0 - GMT
Z America/Scoresbysund -1:27:52 - LMT 1916 Jul 28
-2 - -02 1980 Ap 6 2
-2 c -02/-01 1981 Mar 29
-1 E -01/+00
Z America/Nuuk -3:26:56 - LMT 1916 Jul 28
-3 - -03 1980 Ap 6 2
-3 E -03/-02
Z America/Thule -4:35:8 - LMT 1916 Jul 28
-4 Th A%sT
Z Europe/Tallinn 1:39 - LMT 1880
1:39 - TMT 1918 F
1 c CE%sT 1919 Jul
1:39 - TMT 1921 May
2 - EET 1940 Au 6
3 - MSK 1941 S 15
1 c CE%sT 1944 S 22
3 R MSK/MSD 1989 Mar 26 2s
2 1 EEST 1989 S 24 2s
2 c EE%sT 1998 S 22
2 E EE%sT 1999 O 31 4
2 - EET 2002 F 21
2 E EE%sT
R FI 1942 o - Ap 2 24 1 S
R FI 1942 o - O 4 1 0 -
R FI 1981 1982 - Mar lastSu 2 1 S
R FI 1981 1982 - S lastSu 3 0 -
Z Europe/Helsinki 1:39:49 - LMT 1878 May 31
1:39:49 - HMT 1921 May
2 FI EE%sT 1983
2 E EE%sT
L Europe/Helsinki Europe/Mariehamn
R F 1916 o - Jun 14 23s 1 S
R F 1916 1919 - O Su>=1 23s 0 -
R F 1917 o - Mar 24 23s 1 S
R F 1918 o - Mar 9 23s 1 S
R F 1919 o - Mar 1 23s 1 S
R F 1920 o - F 14 23s 1 S
R F 1920 o - O 23 23s 0 -
R F 1921 o - Mar 14 23s 1 S
R F 1921 o - O 25 23s 0 -
R F 1922 o - Mar 25 23s 1 S
R F 1922 1938 - O Sa>=1 23s 0 -
R F 1923 o - May 26 23s 1 S
R F 1924 o - Mar 29 23s 1 S
R F 1925 o - Ap 4 23s 1 S
R F 1926 o - Ap 17 23s 1 S
R F 1927 o - Ap 9 23s 1 S
R F 1928 o - Ap 14 23s 1 S
R F 1929 o - Ap 20 23s 1 S
R F 1930 o - Ap 12 23s 1 S
R F 1931 o - Ap 18 23s 1 S
R F 1932 o - Ap 2 23s 1 S
R F 1933 o - Mar 25 23s 1 S
R F 1934 o - Ap 7 23s 1 S
R F 1935 o - Mar 30 23s 1 S
R F 1936 o - Ap 18 23s 1 S
R F 1937 o - Ap 3 23s 1 S
R F 1938 o - Mar 26 23s 1 S
R F 1939 o - Ap 15 23s 1 S
R F 1939 o - N 18 23s 0 -
R F 1940 o - F 25 2 1 S
R F 1941 o - May 5 0 2 M
R F 1941 o - O 6 0 1 S
R F 1942 o - Mar 9 0 2 M
R F 1942 o - N 2 3 1 S
R F 1943 o - Mar 29 2 2 M
R F 1943 o - O 4 3 1 S
R F 1944 o - Ap 3 2 2 M
R F 1944 o - O 8 1 1 S
R F 1945 o - Ap 2 2 2 M
R F 1945 o - S 16 3 0 -
R F 1976 o - Mar 28 1 1 S
R F 1976 o - S 26 1 0 -
Z Europe/Paris 0:9:21 - LMT 1891 Mar 15 0:1
0:9:21 - PMT 1911 Mar 11 0:1
0 F WE%sT 1940 Jun 14 23
1 c CE%sT 1944 Au 25
0 F WE%sT 1945 S 16 3
1 F CE%sT 1977
1 E CE%sT
R DE 1946 o - Ap 14 2s 1 S
R DE 1946 o - O 7 2s 0 -
R DE 1947 1949 - O Su>=1 2s 0 -
R DE 1947 o - Ap 6 3s 1 S
R DE 1947 o - May 11 2s 2 M
R DE 1947 o - Jun 29 3 1 S
R DE 1948 o - Ap 18 2s 1 S
R DE 1949 o - Ap 10 2s 1 S
R So 1945 o - May 24 2 2 M
R So 1945 o - S 24 3 1 S
R So 1945 o - N 18 2s 0 -
Z Europe/Berlin 0:53:28 - LMT 1893 Ap
1 c CE%sT 1945 May 24 2
1 So CE%sT 1946
1 DE CE%sT 1980
1 E CE%sT
L Europe/Zurich Europe/Busingen
Z Europe/Gibraltar -0:21:24 - LMT 1880 Au 2 0s
0 G %s 1957 Ap 14 2
1 - CET 1982
1 E CE%sT
R g 1932 o - Jul 7 0 1 S
R g 1932 o - S 1 0 0 -
R g 1941 o - Ap 7 0 1 S
R g 1942 o - N 2 3 0 -
R g 1943 o - Mar 30 0 1 S
R g 1943 o - O 4 0 0 -
R g 1952 o - Jul 1 0 1 S
R g 1952 o - N 2 0 0 -
R g 1975 o - Ap 12 0s 1 S
R g 1975 o - N 26 0s 0 -
R g 1976 o - Ap 11 2s 1 S
R g 1976 o - O 10 2s 0 -
R g 1977 1978 - Ap Su>=1 2s 1 S
R g 1977 o - S 26 2s 0 -
R g 1978 o - S 24 4 0 -
R g 1979 o - Ap 1 9 1 S
R g 1979 o - S 29 2 0 -
R g 1980 o - Ap 1 0 1 S
R g 1980 o - S 28 0 0 -
Z Europe/Athens 1:34:52 - LMT 1895 S 14
1:34:52 - AMT 1916 Jul 28 0:1
2 g EE%sT 1941 Ap 30
1 g CE%sT 1944 Ap 4
2 g EE%sT 1981
2 E EE%sT
R h 1918 o - Ap 1 3 1 S
R h 1918 o - S 16 3 0 -
R h 1919 o - Ap 15 3 1 S
R h 1919 o - N 24 3 0 -
R h 1945 o - May 1 23 1 S
R h 1945 o - N 1 0 0 -
R h 1946 o - Mar 31 2s 1 S
R h 1946 1949 - O Su>=1 2s 0 -
R h 1947 1949 - Ap Su>=4 2s 1 S
R h 1950 o - Ap 17 2s 1 S
R h 1950 o - O 23 2s 0 -
R h 1954 1955 - May 23 0 1 S
R h 1954 1955 - O 3 0 0 -
R h 1956 o - Jun Su>=1 0 1 S
R h 1956 o - S lastSu 0 0 -
R h 1957 o - Jun Su>=1 1 1 S
R h 1957 o - S lastSu 3 0 -
R h 1980 o - Ap 6 1 1 S
Z Europe/Budapest 1:16:20 - LMT 1890 O
1 c CE%sT 1918
1 h CE%sT 1941 Ap 8
1 c CE%sT 1945
1 h CE%sT 1980 S 28 2s
1 E CE%sT
R w 1917 1919 - F 19 23 1 -
R w 1917 o - O 21 1 0 -
R w 1918 1919 - N 16 1 0 -
R w 1921 o - Mar 19 23 1 -
R w 1921 o - Jun 23 1 0 -
R w 1939 o - Ap 29 23 1 -
R w 1939 o - O 29 2 0 -
R w 1940 o - F 25 2 1 -
R w 1940 1941 - N Su>=2 1s 0 -
R w 1941 1942 - Mar Su>=2 1s 1 -
R w 1943 1946 - Mar Su>=1 1s 1 -
R w 1942 1948 - O Su>=22 1s 0 -
R w 1947 1967 - Ap Su>=1 1s 1 -
R w 1949 o - O 30 1s 0 -
R w 1950 1966 - O Su>=22 1s 0 -
R w 1967 o - O 29 1s 0 -
Z Atlantic/Reykjavik -1:28 - LMT 1908
-1 w -01/+00 1968 Ap 7 1s
0 - GMT
R I 1916 o - Jun 3 24 1 S
R I 1916 1917 - S 30 24 0 -
R I 1917 o - Mar 31 24 1 S
R I 1918 o - Mar 9 24 1 S
R I 1918 o - O 6 24 0 -
R I 1919 o - Mar 1 24 1 S
R I 1919 o - O 4 24 0 -
R I 1920 o - Mar 20 24 1 S
R I 1920 o - S 18 24 0 -
R I 1940 o - Jun 14 24 1 S
R I 1942 o - N 2 2s 0 -
R I 1943 o - Mar 29 2s 1 S
R I 1943 o - O 4 2s 0 -
R I 1944 o - Ap 2 2s 1 S
R I 1944 o - S 17 2s 0 -
R I 1945 o - Ap 2 2 1 S
R I 1945 o - S 15 1 0 -
R I 1946 o - Mar 17 2s 1 S
R I 1946 o - O 6 2s 0 -
R I 1947 o - Mar 16 0s 1 S
R I 1947 o - O 5 0s 0 -
R I 1948 o - F 29 2s 1 S
R I 1948 o - O 3 2s 0 -
R I 1966 1968 - May Su>=22 0s 1 S
R I 1966 o - S 24 24 0 -
R I 1967 1969 - S Su>=22 0s 0 -
R I 1969 o - Jun 1 0s 1 S
R I 1970 o - May 31 0s 1 S
R I 1970 o - S lastSu 0s 0 -
R I 1971 1972 - May Su>=22 0s 1 S
R I 1971 o - S lastSu 0s 0 -
R I 1972 o - O 1 0s 0 -
R I 1973 o - Jun 3 0s 1 S
R I 1973 1974 - S lastSu 0s 0 -
R I 1974 o - May 26 0s 1 S
R I 1975 o - Jun 1 0s 1 S
R I 1975 1977 - S lastSu 0s 0 -
R I 1976 o - May 30 0s 1 S
R I 1977 1979 - May Su>=22 0s 1 S
R I 1978 o - O 1 0s 0 -
R I 1979 o - S 30 0s 0 -
Z Europe/Rome 0:49:56 - LMT 1866 D 12
0:49:56 - RMT 1893 O 31 23:49:56
1 I CE%sT 1943 S 10
1 c CE%sT 1944 Jun 4
1 I CE%sT 1980
1 E CE%sT
L Europe/Rome Europe/Vatican
L Europe/Rome Europe/San_Marino
R LV 1989 1996 - Mar lastSu 2s 1 S
R LV 1989 1996 - S lastSu 2s 0 -
Z Europe/Riga 1:36:34 - LMT 1880
1:36:34 - RMT 1918 Ap 15 2
1:36:34 1 LST 1918 S 16 3
1:36:34 - RMT 1919 Ap 1 2
1:36:34 1 LST 1919 May 22 3
1:36:34 - RMT 1926 May 11
2 - EET 1940 Au 5
3 - MSK 1941 Jul
1 c CE%sT 1944 O 13
3 R MSK/MSD 1989 Mar lastSu 2s
2 1 EEST 1989 S lastSu 2s
2 LV EE%sT 1997 Ja 21
2 E EE%sT 2000 F 29
2 - EET 2001 Ja 2
2 E EE%sT
L Europe/Zurich Europe/Vaduz
Z Europe/Vilnius 1:41:16 - LMT 1880
1:24 - WMT 1917
1:35:36 - KMT 1919 O 10
1 - CET 1920 Jul 12
2 - EET 1920 O 9
1 - CET 1940 Au 3
3 - MSK 1941 Jun 24
1 c CE%sT 1944 Au
3 R MSK/MSD 1989 Mar 26 2s
2 R EE%sT 1991 S 29 2s
2 c EE%sT 1998
2 - EET 1998 Mar 29 1u
1 E CE%sT 1999 O 31 1u
2 - EET 2003
2 E EE%sT
R LX 1916 o - May 14 23 1 S
R LX 1916 o - O 1 1 0 -
R LX 1917 o - Ap 28 23 1 S
R LX 1917 o - S 17 1 0 -
R LX 1918 o - Ap M>=15 2s 1 S
R LX 1918 o - S M>=15 2s 0 -
R LX 1919 o - Mar 1 23 1 S
R LX 1919 o - O 5 3 0 -
R LX 1920 o - F 14 23 1 S
R LX 1920 o - O 24 2 0 -
R LX 1921 o - Mar 14 23 1 S
R LX 1921 o - O 26 2 0 -
R LX 1922 o - Mar 25 23 1 S
R LX 1922 o - O Su>=2 1 0 -
R LX 1923 o - Ap 21 23 1 S
R LX 1923 o - O Su>=2 2 0 -
R LX 1924 o - Mar 29 23 1 S
R LX 1924 1928 - O Su>=2 1 0 -
R LX 1925 o - Ap 5 23 1 S
R LX 1926 o - Ap 17 23 1 S
R LX 1927 o - Ap 9 23 1 S
R LX 1928 o - Ap 14 23 1 S
R LX 1929 o - Ap 20 23 1 S
Z Europe/Luxembourg 0:24:36 - LMT 1904 Jun
1 LX CE%sT 1918 N 25
0 LX WE%sT 1929 O 6 2s
0 b WE%sT 1940 May 14 3
1 c WE%sT 1944 S 18 3
1 b CE%sT 1977
1 E CE%sT
R MT 1973 o - Mar 31 0s 1 S
R MT 1973 o - S 29 0s 0 -
R MT 1974 o - Ap 21 0s 1 S
R MT 1974 o - S 16 0s 0 -
R MT 1975 1979 - Ap Su>=15 2 1 S
R MT 1975 1980 - S Su>=15 2 0 -
R MT 1980 o - Mar 31 2 1 S
Z Europe/Malta 0:58:4 - LMT 1893 N 2 0s
1 I CE%sT 1973 Mar 31
1 MT CE%sT 1981
1 E CE%sT
R MD 1997 ma - Mar lastSu 2 1 S
R MD 1997 ma - O lastSu 3 0 -
Z Europe/Chisinau 1:55:20 - LMT 1880
1:55 - CMT 1918 F 15
1:44:24 - BMT 1931 Jul 24
2 z EE%sT 1940 Au 15
2 1 EEST 1941 Jul 17
1 c CE%sT 1944 Au 24
3 R MSK/MSD 1990 May 6 2
2 R EE%sT 1992
2 e EE%sT 1997
2 MD EE%sT
Z Europe/Monaco 0:29:32 - LMT 1891 Mar 15
0:9:21 - PMT 1911 Mar 11
0 F WE%sT 1945 S 16 3
1 F CE%sT 1977
1 E CE%sT
R N 1916 o - May 1 0 1 NST
R N 1916 o - O 1 0 0 AMT
R N 1917 o - Ap 16 2s 1 NST
R N 1917 o - S 17 2s 0 AMT
R N 1918 1921 - Ap M>=1 2s 1 NST
R N 1918 1921 - S lastM 2s 0 AMT
R N 1922 o - Mar lastSu 2s 1 NST
R N 1922 1936 - O Su>=2 2s 0 AMT
R N 1923 o - Jun F>=1 2s 1 NST
R N 1924 o - Mar lastSu 2s 1 NST
R N 1925 o - Jun F>=1 2s 1 NST
R N 1926 1931 - May 15 2s 1 NST
R N 1932 o - May 22 2s 1 NST
R N 1933 1936 - May 15 2s 1 NST
R N 1937 o - May 22 2s 1 NST
R N 1937 o - Jul 1 0 1 S
R N 1937 1939 - O Su>=2 2s 0 -
R N 1938 1939 - May 15 2s 1 S
R N 1945 o - Ap 2 2s 1 S
R N 1945 o - S 16 2s 0 -
Z Europe/Amsterdam 0:19:32 - LMT 1835
0:19:32 N %s 1937 Jul
0:20 N +0020/+0120 1940 May 16
1 c CE%sT 1945 Ap 2 2
1 N CE%sT 1977
1 E CE%sT
R NO 1916 o - May 22 1 1 S
R NO 1916 o - S 30 0 0 -
R NO 1945 o - Ap 2 2s 1 S
R NO 1945 o - O 1 2s 0 -
R NO 1959 1964 - Mar Su>=15 2s 1 S
R NO 1959 1965 - S Su>=15 2s 0 -
R NO 1965 o - Ap 25 2s 1 S
Z Europe/Oslo 0:43 - LMT 1895
1 NO CE%sT 1940 Au 10 23
1 c CE%sT 1945 Ap 2 2
1 NO CE%sT 1980
1 E CE%sT
L Europe/Oslo Arctic/Longyearbyen
R O 1918 1919 - S 16 2s 0 -
R O 1919 o - Ap 15 2s 1 S
R O 1944 o - Ap 3 2s 1 S
R O 1944 o - O 4 2 0 -
R O 1945 o - Ap 29 0 1 S
R O 1945 o - N 1 0 0 -
R O 1946 o - Ap 14 0s 1 S
R O 1946 o - O 7 2s 0 -
R O 1947 o - May 4 2s 1 S
R O 1947 1949 - O Su>=1 2s 0 -
R O 1948 o - Ap 18 2s 1 S
R O 1949 o - Ap 10 2s 1 S
R O 1957 o - Jun 2 1s 1 S
R O 1957 1958 - S lastSu 1s 0 -
R O 1958 o - Mar 30 1s 1 S
R O 1959 o - May 31 1s 1 S
R O 1959 1961 - O Su>=1 1s 0 -
R O 1960 o - Ap 3 1s 1 S
R O 1961 1964 - May lastSu 1s 1 S
R O 1962 1964 - S lastSu 1s 0 -
Z Europe/Warsaw 1:24 - LMT 1880
1:24 - WMT 1915 Au 5
1 c CE%sT 1918 S 16 3
2 O EE%sT 1922 Jun
1 O CE%sT 1940 Jun 23 2
1 c CE%sT 1944 O
1 O CE%sT 1977
1 W- CE%sT 1988
1 E CE%sT
R p 1916 o - Jun 17 23 1 S
R p 1916 o - N 1 1 0 -
R p 1917 o - F 28 23s 1 S
R p 1917 1921 - O 14 23s 0 -
R p 1918 o - Mar 1 23s 1 S
R p 1919 o - F 28 23s 1 S
R p 1920 o - F 29 23s 1 S
R p 1921 o - F 28 23s 1 S
R p 1924 o - Ap 16 23s 1 S
R p 1924 o - O 14 23s 0 -
R p 1926 o - Ap 17 23s 1 S
R p 1926 1929 - O Sa>=1 23s 0 -
R p 1927 o - Ap 9 23s 1 S
R p 1928 o - Ap 14 23s 1 S
R p 1929 o - Ap 20 23s 1 S
R p 1931 o - Ap 18 23s 1 S
R p 1931 1932 - O Sa>=1 23s 0 -
R p 1932 o - Ap 2 23s 1 S
R p 1934 o - Ap 7 23s 1 S
R p 1934 1938 - O Sa>=1 23s 0 -
R p 1935 o - Mar 30 23s 1 S
R p 1936 o - Ap 18 23s 1 S
R p 1937 o - Ap 3 23s 1 S
R p 1938 o - Mar 26 23s 1 S
R p 1939 o - Ap 15 23s 1 S
R p 1939 o - N 18 23s 0 -
R p 1940 o - F 24 23s 1 S
R p 1940 1941 - O 5 23s 0 -
R p 1941 o - Ap 5 23s 1 S
R p 1942 1945 - Mar Sa>=8 23s 1 S
R p 1942 o - Ap 25 22s 2 M
R p 1942 o - Au 15 22s 1 S
R p 1942 1945 - O Sa>=24 23s 0 -
R p 1943 o - Ap 17 22s 2 M
R p 1943 1945 - Au Sa>=25 22s 1 S
R p 1944 1945 - Ap Sa>=21 22s 2 M
R p 1946 o - Ap Sa>=1 23s 1 S
R p 1946 o - O Sa>=1 23s 0 -
R p 1947 1949 - Ap Su>=1 2s 1 S
R p 1947 1949 - O Su>=1 2s 0 -
R p 1951 1965 - Ap Su>=1 2s 1 S
R p 1951 1965 - O Su>=1 2s 0 -
R p 1977 o - Mar 27 0s 1 S
R p 1977 o - S 25 0s 0 -
R p 1978 1979 - Ap Su>=1 0s 1 S
R p 1978 o - O 1 0s 0 -
R p 1979 1982 - S lastSu 1s 0 -
R p 1980 o - Mar lastSu 0s 1 S
R p 1981 1982 - Mar lastSu 1s 1 S
R p 1983 o - Mar lastSu 2s 1 S
Z Europe/Lisbon -0:36:45 - LMT 1884
-0:36:45 - LMT 1912 Ja 1 0u
0 p WE%sT 1966 Ap 3 2
1 - CET 1976 S 26 1
0 p WE%sT 1983 S 25 1s
0 W- WE%sT 1992 S 27 1s
1 E CE%sT 1996 Mar 31 1u
0 E WE%sT
Z Atlantic/Azores -1:42:40 - LMT 1884
-1:54:32 - HMT 1912 Ja 1 2u
-2 p -02/-01 1942 Ap 25 22s
-2 p +00 1942 Au 15 22s
-2 p -02/-01 1943 Ap 17 22s
-2 p +00 1943 Au 28 22s
-2 p -02/-01 1944 Ap 22 22s
-2 p +00 1944 Au 26 22s
-2 p -02/-01 1945 Ap 21 22s
-2 p +00 1945 Au 25 22s
-2 p -02/-01 1966 Ap 3 2
-1 p -01/+00 1983 S 25 1s
-1 W- -01/+00 1992 S 27 1s
0 E WE%sT 1993 Mar 28 1u
-1 E -01/+00
Z Atlantic/Madeira -1:7:36 - LMT 1884
-1:7:36 - FMT 1912 Ja 1 1u
-1 p -01/+00 1942 Ap 25 22s
-1 p +01 1942 Au 15 22s
-1 p -01/+00 1943 Ap 17 22s
-1 p +01 1943 Au 28 22s
-1 p -01/+00 1944 Ap 22 22s
-1 p +01 1944 Au 26 22s
-1 p -01/+00 1945 Ap 21 22s
-1 p +01 1945 Au 25 22s
-1 p -01/+00 1966 Ap 3 2
0 p WE%sT 1983 S 25 1s
0 E WE%sT
R z 1932 o - May 21 0s 1 S
R z 1932 1939 - O Su>=1 0s 0 -
R z 1933 1939 - Ap Su>=2 0s 1 S
R z 1979 o - May 27 0 1 S
R z 1979 o - S lastSu 0 0 -
R z 1980 o - Ap 5 23 1 S
R z 1980 o - S lastSu 1 0 -
R z 1991 1993 - Mar lastSu 0s 1 S
R z 1991 1993 - S lastSu 0s 0 -
Z Europe/Bucharest 1:44:24 - LMT 1891 O
1:44:24 - BMT 1931 Jul 24
2 z EE%sT 1981 Mar 29 2s
2 c EE%sT 1991
2 z EE%sT 1994
2 e EE%sT 1997
2 E EE%sT
Z Europe/Kaliningrad 1:22 - LMT 1893 Ap
1 c CE%sT 1945 Ap 10
2 O EE%sT 1946 Ap 7
3 R MSK/MSD 1989 Mar 26 2s
2 R EE%sT 2011 Mar 27 2s
3 - +03 2014 O 26 2s
2 - EET
Z Europe/Moscow 2:30:17 - LMT 1880
2:30:17 - MMT 1916 Jul 3
2:31:19 R %s 1919 Jul 1 0u
3 R %s 1921 O
3 R MSK/MSD 1922 O
2 - EET 1930 Jun 21
3 R MSK/MSD 1991 Mar 31 2s
2 R EE%sT 1992 Ja 19 2s
3 R MSK/MSD 2011 Mar 27 2s
4 - MSK 2014 O 26 2s
3 - MSK
Z Europe/Simferopol 2:16:24 - LMT 1880
2:16 - SMT 1924 May 2
2 - EET 1930 Jun 21
3 - MSK 1941 N
1 c CE%sT 1944 Ap 13
3 R MSK/MSD 1990
3 - MSK 1990 Jul 1 2
2 - EET 1992
2 e EE%sT 1994 May
3 e MSK/MSD 1996 Mar 31 0s
3 1 MSD 1996 O 27 3s
3 R MSK/MSD 1997
3 - MSK 1997 Mar lastSu 1u
2 E EE%sT 2014 Mar 30 2
4 - MSK 2014 O 26 2s
3 - MSK
Z Europe/Astrakhan 3:12:12 - LMT 1924 May
3 - +03 1930 Jun 21
4 R +04/+05 1989 Mar 26 2s
3 R +03/+04 1991 Mar 31 2s
4 - +04 1992 Mar 29 2s
3 R +03/+04 2011 Mar 27 2s
4 - +04 2014 O 26 2s
3 - +03 2016 Mar 27 2s
4 - +04
Z Europe/Volgograd 2:57:40 - LMT 1920 Ja 3
3 - +03 1930 Jun 21
4 - +04 1961 N 11
4 R +04/+05 1988 Mar 27 2s
3 R +03/+04 1991 Mar 31 2s
4 - +04 1992 Mar 29 2s
3 R +03/+04 2011 Mar 27 2s
4 - +04 2014 O 26 2s
3 - +03 2018 O 28 2s
4 - +04
Z Europe/Saratov 3:4:18 - LMT 1919 Jul 1 0u
3 - +03 1930 Jun 21
4 R +04/+05 1988 Mar 27 2s
3 R +03/+04 1991 Mar 31 2s
4 - +04 1992 Mar 29 2s
3 R +03/+04 2011 Mar 27 2s
4 - +04 2014 O 26 2s
3 - +03 2016 D 4 2s
4 - +04
Z Europe/Kirov 3:18:48 - LMT 1919 Jul 1 0u
3 - +03 1930 Jun 21
4 R +04/+05 1989 Mar 26 2s
3 R +03/+04 1991 Mar 31 2s
4 - +04 1992 Mar 29 2s
3 R +03/+04 2011 Mar 27 2s
4 - +04 2014 O 26 2s
3 - +03
Z Europe/Samara 3:20:20 - LMT 1919 Jul 1 0u
3 - +03 1930 Jun 21
4 - +04 1935 Ja 27
4 R +04/+05 1989 Mar 26 2s
3 R +03/+04 1991 Mar 31 2s
2 R +02/+03 1991 S 29 2s
3 - +03 1991 O 20 3
4 R +04/+05 2010 Mar 28 2s
3 R +03/+04 2011 Mar 27 2s
4 - +04
Z Europe/Ulyanovsk 3:13:36 - LMT 1919 Jul 1 0u
3 - +03 1930 Jun 21
4 R +04/+05 1989 Mar 26 2s
3 R +03/+04 1991 Mar 31 2s
2 R +02/+03 1992 Ja 19 2s
3 R +03/+04 2011 Mar 27 2s
4 - +04 2014 O 26 2s
3 - +03 2016 Mar 27 2s
4 - +04
Z Asia/Yekaterinburg 4:2:33 - LMT 1916 Jul 3
3:45:5 - PMT 1919 Jul 15 4
4 - +04 1930 Jun 21
5 R +05/+06 1991 Mar 31 2s
4 R +04/+05 1992 Ja 19 2s
5 R +05/+06 2011 Mar 27 2s
6 - +06 2014 O 26 2s
5 - +05
Z Asia/Omsk 4:53:30 - LMT 1919 N 14
5 - +05 1930 Jun 21
6 R +06/+07 1991 Mar 31 2s
5 R +05/+06 1992 Ja 19 2s
6 R +06/+07 2011 Mar 27 2s
7 - +07 2014 O 26 2s
6 - +06
Z Asia/Barnaul 5:35 - LMT 1919 D 10
6 - +06 1930 Jun 21
7 R +07/+08 1991 Mar 31 2s
6 R +06/+07 1992 Ja 19 2s
7 R +07/+08 1995 May 28
6 R +06/+07 2011 Mar 27 2s
7 - +07 2014 O 26 2s
6 - +06 2016 Mar 27 2s
7 - +07
Z Asia/Novosibirsk 5:31:40 - LMT 1919 D 14 6
6 - +06 1930 Jun 21
7 R +07/+08 1991 Mar 31 2s
6 R +06/+07 1992 Ja 19 2s
7 R +07/+08 1993 May 23
6 R +06/+07 2011 Mar 27 2s
7 - +07 2014 O 26 2s
6 - +06 2016 Jul 24 2s
7 - +07
Z Asia/Tomsk 5:39:51 - LMT 1919 D 22
6 - +06 1930 Jun 21
7 R +07/+08 1991 Mar 31 2s
6 R +06/+07 1992 Ja 19 2s
7 R +07/+08 2002 May 1 3
6 R +06/+07 2011 Mar 27 2s
7 - +07 2014 O 26 2s
6 - +06 2016 May 29 2s
7 - +07
Z Asia/Novokuznetsk 5:48:48 - LMT 1924 May
6 - +06 1930 Jun 21
7 R +07/+08 1991 Mar 31 2s
6 R +06/+07 1992 Ja 19 2s
7 R +07/+08 2010 Mar 28 2s
6 R +06/+07 2011 Mar 27 2s
7 - +07
Z Asia/Krasnoyarsk 6:11:26 - LMT 1920 Ja 6
6 - +06 1930 Jun 21
7 R +07/+08 1991 Mar 31 2s
6 R +06/+07 1992 Ja 19 2s
7 R +07/+08 2011 Mar 27 2s
8 - +08 2014 O 26 2s
7 - +07
Z Asia/Irkutsk 6:57:5 - LMT 1880
6:57:5 - IMT 1920 Ja 25
7 - +07 1930 Jun 21
8 R +08/+09 1991 Mar 31 2s
7 R +07/+08 1992 Ja 19 2s
8 R +08/+09 2011 Mar 27 2s
9 - +09 2014 O 26 2s
8 - +08
Z Asia/Chita 7:33:52 - LMT 1919 D 15
8 - +08 1930 Jun 21
9 R +09/+10 1991 Mar 31 2s
8 R +08/+09 1992 Ja 19 2s
9 R +09/+10 2011 Mar 27 2s
10 - +10 2014 O 26 2s
8 - +08 2016 Mar 27 2
9 - +09
Z Asia/Yakutsk 8:38:58 - LMT 1919 D 15
8 - +08 1930 Jun 21
9 R +09/+10 1991 Mar 31 2s
8 R +08/+09 1992 Ja 19 2s
9 R +09/+10 2011 Mar 27 2s
10 - +10 2014 O 26 2s
9 - +09
Z Asia/Vladivostok 8:47:31 - LMT 1922 N 15
9 - +09 1930 Jun 21
10 R +10/+11 1991 Mar 31 2s
9 R +09/+10 1992 Ja 19 2s
10 R +10/+11 2011 Mar 27 2s
11 - +11 2014 O 26 2s
10 - +10
Z Asia/Khandyga 9:2:13 - LMT 1919 D 15
8 - +08 1930 Jun 21
9 R +09/+10 1991 Mar 31 2s
8 R +08/+09 1992 Ja 19 2s
9 R +09/+10 2004
10 R +10/+11 2011 Mar 27 2s
11 - +11 2011 S 13 0s
10 - +10 2014 O 26 2s
9 - +09
Z Asia/Sakhalin 9:30:48 - LMT 1905 Au 23
9 - +09 1945 Au 25
11 R +11/+12 1991 Mar 31 2s
10 R +10/+11 1992 Ja 19 2s
11 R +11/+12 1997 Mar lastSu 2s
10 R +10/+11 2011 Mar 27 2s
11 - +11 2014 O 26 2s
10 - +10 2016 Mar 27 2s
11 - +11
Z Asia/Magadan 10:3:12 - LMT 1924 May 2
10 - +10 1930 Jun 21
11 R +11/+12 1991 Mar 31 2s
10 R +10/+11 1992 Ja 19 2s
11 R +11/+12 2011 Mar 27 2s
12 - +12 2014 O 26 2s
10 - +10 2016 Ap 24 2s
11 - +11
Z Asia/Srednekolymsk 10:14:52 - LMT 1924 May 2
10 - +10 1930 Jun 21
11 R +11/+12 1991 Mar 31 2s
10 R +10/+11 1992 Ja 19 2s
11 R +11/+12 2011 Mar 27 2s
12 - +12 2014 O 26 2s
11 - +11
Z Asia/Ust-Nera 9:32:54 - LMT 1919 D 15
8 - +08 1930 Jun 21
9 R +09/+10 1981 Ap
11 R +11/+12 1991 Mar 31 2s
10 R +10/+11 1992 Ja 19 2s
11 R +11/+12 2011 Mar 27 2s
12 - +12 2011 S 13 0s
11 - +11 2014 O 26 2s
10 - +10
Z Asia/Kamchatka 10:34:36 - LMT 1922 N 10
11 - +11 1930 Jun 21
12 R +12/+13 1991 Mar 31 2s
11 R +11/+12 1992 Ja 19 2s
12 R +12/+13 2010 Mar 28 2s
11 R +11/+12 2011 Mar 27 2s
12 - +12
Z Asia/Anadyr 11:49:56 - LMT 1924 May 2
12 - +12 1930 Jun 21
13 R +13/+14 1982 Ap 1 0s
12 R +12/+13 1991 Mar 31 2s
11 R +11/+12 1992 Ja 19 2s
12 R +12/+13 2010 Mar 28 2s
11 R +11/+12 2011 Mar 27 2s
12 - +12
Z Europe/Belgrade 1:22 - LMT 1884
1 - CET 1941 Ap 18 23
1 c CE%sT 1945
1 - CET 1945 May 8 2s
1 1 CEST 1945 S 16 2s
1 - CET 1982 N 27
1 E CE%sT
L Europe/Belgrade Europe/Ljubljana
L Europe/Belgrade Europe/Podgorica
L Europe/Belgrade Europe/Sarajevo
L Europe/Belgrade Europe/Skopje
L Europe/Belgrade Europe/Zagreb
L Europe/Prague Europe/Bratislava
R s 1918 o - Ap 15 23 1 S
R s 1918 1919 - O 6 24s 0 -
R s 1919 o - Ap 6 23 1 S
R s 1924 o - Ap 16 23 1 S
R s 1924 o - O 4 24s 0 -
R s 1926 o - Ap 17 23 1 S
R s 1926 1929 - O Sa>=1 24s 0 -
R s 1927 o - Ap 9 23 1 S
R s 1928 o - Ap 15 0 1 S
R s 1929 o - Ap 20 23 1 S
R s 1937 o - Jun 16 23 1 S
R s 1937 o - O 2 24s 0 -
R s 1938 o - Ap 2 23 1 S
R s 1938 o - Ap 30 23 2 M
R s 1938 o - O 2 24 1 S
R s 1939 o - O 7 24s 0 -
R s 1942 o - May 2 23 1 S
R s 1942 o - S 1 1 0 -
R s 1943 1946 - Ap Sa>=13 23 1 S
R s 1943 1944 - O Su>=1 1 0 -
R s 1945 1946 - S lastSu 1 0 -
R s 1949 o - Ap 30 23 1 S
R s 1949 o - O 2 1 0 -
R s 1974 1975 - Ap Sa>=12 23 1 S
R s 1974 1975 - O Su>=1 1 0 -
R s 1976 o - Mar 27 23 1 S
R s 1976 1977 - S lastSu 1 0 -
R s 1977 o - Ap 2 23 1 S
R s 1978 o - Ap 2 2s 1 S
R s 1978 o - O 1 2s 0 -
R Sp 1967 o - Jun 3 12 1 S
R Sp 1967 o - O 1 0 0 -
R Sp 1974 o - Jun 24 0 1 S
R Sp 1974 o - S 1 0 0 -
R Sp 1976 1977 - May 1 0 1 S
R Sp 1976 o - Au 1 0 0 -
R Sp 1977 o - S 28 0 0 -
R Sp 1978 o - Jun 1 0 1 S
R Sp 1978 o - Au 4 0 0 -
Z Europe/Madrid -0:14:44 - LMT 1900 D 31 23:45:16
0 s WE%sT 1940 Mar 16 23
1 s CE%sT 1979
1 E CE%sT
Z Africa/Ceuta -0:21:16 - LMT 1900 D 31 23:38:44
0 - WET 1918 May 6 23
0 1 WEST 1918 O 7 23
0 - WET 1924
0 s WE%sT 1929
0 - WET 1967
0 Sp WE%sT 1984 Mar 16
1 - CET 1986
1 E CE%sT
Z Atlantic/Canary -1:1:36 - LMT 1922 Mar
-1 - -01 1946 S 30 1
0 - WET 1980 Ap 6 0s
0 1 WEST 1980 S 28 1u
0 E WE%sT
Z Europe/Stockholm 1:12:12 - LMT 1879
1:0:14 - SET 1900
1 - CET 1916 May 14 23
1 1 CEST 1916 O 1 1
1 - CET 1980
1 E CE%sT
R CH 1941 1942 - May M>=1 1 1 S
R CH 1941 1942 - O M>=1 2 0 -
Z Europe/Zurich 0:34:8 - LMT 1853 Jul 16
0:29:46 - BMT 1894 Jun
1 CH CE%sT 1981
1 E CE%sT
R T 1916 o - May 1 0 1 S
R T 1916 o - O 1 0 0 -
R T 1920 o - Mar 28 0 1 S
R T 1920 o - O 25 0 0 -
R T 1921 o - Ap 3 0 1 S
R T 1921 o - O 3 0 0 -
R T 1922 o - Mar 26 0 1 S
R T 1922 o - O 8 0 0 -
R T 1924 o - May 13 0 1 S
R T 1924 1925 - O 1 0 0 -
R T 1925 o - May 1 0 1 S
R T 1940 o - Jul 1 0 1 S
R T 1940 o - O 6 0 0 -
R T 1940 o - D 1 0 1 S
R T 1941 o - S 21 0 0 -
R T 1942 o - Ap 1 0 1 S
R T 1945 o - O 8 0 0 -
R T 1946 o - Jun 1 0 1 S
R T 1946 o - O 1 0 0 -
R T 1947 1948 - Ap Su>=16 0 1 S
R T 1947 1951 - O Su>=2 0 0 -
R T 1949 o - Ap 10 0 1 S
R T 1950 o - Ap 16 0 1 S
R T 1951 o - Ap 22 0 1 S
R T 1962 o - Jul 15 0 1 S
R T 1963 o - O 30 0 0 -
R T 1964 o - May 15 0 1 S
R T 1964 o - O 1 0 0 -
R T 1973 o - Jun 3 1 1 S
R T 1973 1976 - O Su>=31 2 0 -
R T 1974 o - Mar 31 2 1 S
R T 1975 o - Mar 22 2 1 S
R T 1976 o - Mar 21 2 1 S
R T 1977 1978 - Ap Su>=1 2 1 S
R T 1977 1978 - O Su>=15 2 0 -
R T 1978 o - Jun 29 0 0 -
R T 1983 o - Jul 31 2 1 S
R T 1983 o - O 2 2 0 -
R T 1985 o - Ap 20 1s 1 S
R T 1985 o - S 28 1s 0 -
R T 1986 1993 - Mar lastSu 1s 1 S
R T 1986 1995 - S lastSu 1s 0 -
R T 1994 o - Mar 20 1s 1 S
R T 1995 2006 - Mar lastSu 1s 1 S
R T 1996 2006 - O lastSu 1s 0 -
Z Europe/Istanbul 1:55:52 - LMT 1880
1:56:56 - IMT 1910 O
2 T EE%sT 1978 Jun 29
3 T +03/+04 1984 N 1 2
2 T EE%sT 2007
2 E EE%sT 2011 Mar 27 1u
2 - EET 2011 Mar 28 1u
2 E EE%sT 2014 Mar 30 1u
2 - EET 2014 Mar 31 1u
2 E EE%sT 2015 O 25 1u
2 1 EEST 2015 N 8 1u
2 E EE%sT 2016 S 7
3 - +03
L Europe/Istanbul Asia/Istanbul
Z Europe/Kiev 2:2:4 - LMT 1880
2:2:4 - KMT 1924 May 2
2 - EET 1930 Jun 21
3 - MSK 1941 S 20
1 c CE%sT 1943 N 6
3 R MSK/MSD 1990 Jul 1 2
2 1 EEST 1991 S 29 3
2 e EE%sT 1995
2 E EE%sT
Z Europe/Uzhgorod 1:29:12 - LMT 1890 O
1 - CET 1940
1 c CE%sT 1944 O
1 1 CEST 1944 O 26
1 - CET 1945 Jun 29
3 R MSK/MSD 1990
3 - MSK 1990 Jul 1 2
1 - CET 1991 Mar 31 3
2 - EET 1992
2 e EE%sT 1995
2 E EE%sT
Z Europe/Zaporozhye 2:20:40 - LMT 1880
2:20 - +0220 1924 May 2
2 - EET 1930 Jun 21
3 - MSK 1941 Au 25
1 c CE%sT 1943 O 25
3 R MSK/MSD 1991 Mar 31 2
2 e EE%sT 1995
2 E EE%sT
R u 1918 1919 - Mar lastSu 2 1 D
R u 1918 1919 - O lastSu 2 0 S
R u 1942 o - F 9 2 1 W
R u 1945 o - Au 14 23u 1 P
R u 1945 o - S 30 2 0 S
R u 1967 2006 - O lastSu 2 0 S
R u 1967 1973 - Ap lastSu 2 1 D
R u 1974 o - Ja 6 2 1 D
R u 1975 o - F lastSu 2 1 D
R u 1976 1986 - Ap lastSu 2 1 D
R u 1987 2006 - Ap Su>=1 2 1 D
R u 2007 ma - Mar Su>=8 2 1 D
R u 2007 ma - N Su>=1 2 0 S
Z EST -5 - EST
Z MST -7 - MST
Z HST -10 - HST
Z EST5EDT -5 u E%sT
Z CST6CDT -6 u C%sT
Z MST7MDT -7 u M%sT
Z PST8PDT -8 u P%sT
R NY 1920 o - Mar lastSu 2 1 D
R NY 1920 o - O lastSu 2 0 S
R NY 1921 1966 - Ap lastSu 2 1 D
R NY 1921 1954 - S lastSu 2 0 S
R NY 1955 1966 - O lastSu 2 0 S
Z America/New_York -4:56:2 - LMT 1883 N 18 12:3:58
-5 u E%sT 1920
-5 NY E%sT 1942
-5 u E%sT 1946
-5 NY E%sT 1967
-5 u E%sT
R Ch 1920 o - Jun 13 2 1 D
R Ch 1920 1921 - O lastSu 2 0 S
R Ch 1921 o - Mar lastSu 2 1 D
R Ch 1922 1966 - Ap lastSu 2 1 D
R Ch 1922 1954 - S lastSu 2 0 S
R Ch 1955 1966 - O lastSu 2 0 S
Z America/Chicago -5:50:36 - LMT 1883 N 18 12:9:24
-6 u C%sT 1920
-6 Ch C%sT 1936 Mar 1 2
-5 - EST 1936 N 15 2
-6 Ch C%sT 1942
-6 u C%sT 1946
-6 Ch C%sT 1967
-6 u C%sT
Z America/North_Dakota/Center -6:45:12 - LMT 1883 N 18 12:14:48
-7 u M%sT 1992 O 25 2
-6 u C%sT
Z America/North_Dakota/New_Salem -6:45:39 - LMT 1883 N 18 12:14:21
-7 u M%sT 2003 O 26 2
-6 u C%sT
Z America/North_Dakota/Beulah -6:47:7 - LMT 1883 N 18 12:12:53
-7 u M%sT 2010 N 7 2
-6 u C%sT
R De 1920 1921 - Mar lastSu 2 1 D
R De 1920 o - O lastSu 2 0 S
R De 1921 o - May 22 2 0 S
R De 1965 1966 - Ap lastSu 2 1 D
R De 1965 1966 - O lastSu 2 0 S
Z America/Denver -6:59:56 - LMT 1883 N 18 12:0:4
-7 u M%sT 1920
-7 De M%sT 1942
-7 u M%sT 1946
-7 De M%sT 1967
-7 u M%sT
R CA 1948 o - Mar 14 2:1 1 D
R CA 1949 o - Ja 1 2 0 S
R CA 1950 1966 - Ap lastSu 1 1 D
R CA 1950 1961 - S lastSu 2 0 S
R CA 1962 1966 - O lastSu 2 0 S
Z America/Los_Angeles -7:52:58 - LMT 1883 N 18 12:7:2
-8 u P%sT 1946
-8 CA P%sT 1967
-8 u P%sT
Z America/Juneau 15:2:19 - LMT 1867 O 19 15:33:32
-8:57:41 - LMT 1900 Au 20 12
-8 - PST 1942
-8 u P%sT 1946
-8 - PST 1969
-8 u P%sT 1980 Ap 27 2
-9 u Y%sT 1980 O 26 2
-8 u P%sT 1983 O 30 2
-9 u Y%sT 1983 N 30
-9 u AK%sT
Z America/Sitka 14:58:47 - LMT 1867 O 19 15:30
-9:1:13 - LMT 1900 Au 20 12
-8 - PST 1942
-8 u P%sT 1946
-8 - PST 1969
-8 u P%sT 1983 O 30 2
-9 u Y%sT 1983 N 30
-9 u AK%sT
Z America/Metlakatla 15:13:42 - LMT 1867 O 19 15:44:55
-8:46:18 - LMT 1900 Au 20 12
-8 - PST 1942
-8 u P%sT 1946
-8 - PST 1969
-8 u P%sT 1983 O 30 2
-8 - PST 2015 N 1 2
-9 u AK%sT 2018 N 4 2
-8 - PST 2019 Ja 20 2
-9 u AK%sT
Z America/Yakutat 14:41:5 - LMT 1867 O 19 15:12:18
-9:18:55 - LMT 1900 Au 20 12
-9 - YST 1942
-9 u Y%sT 1946
-9 - YST 1969
-9 u Y%sT 1983 N 30
-9 u AK%sT
Z America/Anchorage 14:0:24 - LMT 1867 O 19 14:31:37
-9:59:36 - LMT 1900 Au 20 12
-10 - AST 1942
-10 u A%sT 1967 Ap
-10 - AHST 1969
-10 u AH%sT 1983 O 30 2
-9 u Y%sT 1983 N 30
-9 u AK%sT
Z America/Nome 12:58:22 - LMT 1867 O 19 13:29:35
-11:1:38 - LMT 1900 Au 20 12
-11 - NST 1942
-11 u N%sT 1946
-11 - NST 1967 Ap
-11 - BST 1969
-11 u B%sT 1983 O 30 2
-9 u Y%sT 1983 N 30
-9 u AK%sT
Z America/Adak 12:13:22 - LMT 1867 O 19 12:44:35
-11:46:38 - LMT 1900 Au 20 12
-11 - NST 1942
-11 u N%sT 1946
-11 - NST 1967 Ap
-11 - BST 1969
-11 u B%sT 1983 O 30 2
-10 u AH%sT 1983 N 30
-10 u H%sT
Z Pacific/Honolulu -10:31:26 - LMT 1896 Ja 13 12
-10:30 - HST 1933 Ap 30 2
-10:30 1 HDT 1933 May 21 12
-10:30 u H%sT 1947 Jun 8 2
-10 - HST
Z America/Phoenix -7:28:18 - LMT 1883 N 18 11:31:42
-7 u M%sT 1944 Ja 1 0:1
-7 - MST 1944 Ap 1 0:1
-7 u M%sT 1944 O 1 0:1
-7 - MST 1967
-7 u M%sT 1968 Mar 21
-7 - MST
Z America/Boise -7:44:49 - LMT 1883 N 18 12:15:11
-8 u P%sT 1923 May 13 2
-7 u M%sT 1974
-7 - MST 1974 F 3 2
-7 u M%sT
R In 1941 o - Jun 22 2 1 D
R In 1941 1954 - S lastSu 2 0 S
R In 1946 1954 - Ap lastSu 2 1 D
Z America/Indiana/Indianapolis -5:44:38 - LMT 1883 N 18 12:15:22
-6 u C%sT 1920
-6 In C%sT 1942
-6 u C%sT 1946
-6 In C%sT 1955 Ap 24 2
-5 - EST 1957 S 29 2
-6 - CST 1958 Ap 27 2
-5 - EST 1969
-5 u E%sT 1971
-5 - EST 2006
-5 u E%sT
R Ma 1951 o - Ap lastSu 2 1 D
R Ma 1951 o - S lastSu 2 0 S
R Ma 1954 1960 - Ap lastSu 2 1 D
R Ma 1954 1960 - S lastSu 2 0 S
Z America/Indiana/Marengo -5:45:23 - LMT 1883 N 18 12:14:37
-6 u C%sT 1951
-6 Ma C%sT 1961 Ap 30 2
-5 - EST 1969
-5 u E%sT 1974 Ja 6 2
-6 1 CDT 1974 O 27 2
-5 u E%sT 1976
-5 - EST 2006
-5 u E%sT
R V 1946 o - Ap lastSu 2 1 D
R V 1946 o - S lastSu 2 0 S
R V 1953 1954 - Ap lastSu 2 1 D
R V 1953 1959 - S lastSu 2 0 S
R V 1955 o - May 1 0 1 D
R V 1956 1963 - Ap lastSu 2 1 D
R V 1960 o - O lastSu 2 0 S
R V 1961 o - S lastSu 2 0 S
R V 1962 1963 - O lastSu 2 0 S
Z America/Indiana/Vincennes -5:50:7 - LMT 1883 N 18 12:9:53
-6 u C%sT 1946
-6 V C%sT 1964 Ap 26 2
-5 - EST 1969
-5 u E%sT 1971
-5 - EST 2006 Ap 2 2
-6 u C%sT 2007 N 4 2
-5 u E%sT
R Pe 1955 o - May 1 0 1 D
R Pe 1955 1960 - S lastSu 2 0 S
R Pe 1956 1963 - Ap lastSu 2 1 D
R Pe 1961 1963 - O lastSu 2 0 S
Z America/Indiana/Tell_City -5:47:3 - LMT 1883 N 18 12:12:57
-6 u C%sT 1946
-6 Pe C%sT 1964 Ap 26 2
-5 - EST 1967 O 29 2
-6 u C%sT 1969 Ap 27 2
-5 u E%sT 1971
-5 - EST 2006 Ap 2 2
-6 u C%sT
R Pi 1955 o - May 1 0 1 D
R Pi 1955 1960 - S lastSu 2 0 S
R Pi 1956 1964 - Ap lastSu 2 1 D
R Pi 1961 1964 - O lastSu 2 0 S
Z America/Indiana/Petersburg -5:49:7 - LMT 1883 N 18 12:10:53
-6 u C%sT 1955
-6 Pi C%sT 1965 Ap 25 2
-5 - EST 1966 O 30 2
-6 u C%sT 1977 O 30 2
-5 - EST 2006 Ap 2 2
-6 u C%sT 2007 N 4 2
-5 u E%sT
R St 1947 1961 - Ap lastSu 2 1 D
R St 1947 1954 - S lastSu 2 0 S
R St 1955 1956 - O lastSu 2 0 S
R St 1957 1958 - S lastSu 2 0 S
R St 1959 1961 - O lastSu 2 0 S
Z America/Indiana/Knox -5:46:30 - LMT 1883 N 18 12:13:30
-6 u C%sT 1947
-6 St C%sT 1962 Ap 29 2
-5 - EST 1963 O 27 2
-6 u C%sT 1991 O 27 2
-5 - EST 2006 Ap 2 2
-6 u C%sT
R Pu 1946 1960 - Ap lastSu 2 1 D
R Pu 1946 1954 - S lastSu 2 0 S
R Pu 1955 1956 - O lastSu 2 0 S
R Pu 1957 1960 - S lastSu 2 0 S
Z America/Indiana/Winamac -5:46:25 - LMT 1883 N 18 12:13:35
-6 u C%sT 1946
-6 Pu C%sT 1961 Ap 30 2
-5 - EST 1969
-5 u E%sT 1971
-5 - EST 2006 Ap 2 2
-6 u C%sT 2007 Mar 11 2
-5 u E%sT
Z America/Indiana/Vevay -5:40:16 - LMT 1883 N 18 12:19:44
-6 u C%sT 1954 Ap 25 2
-5 - EST 1969
-5 u E%sT 1973
-5 - EST 2006
-5 u E%sT
R v 1921 o - May 1 2 1 D
R v 1921 o - S 1 2 0 S
R v 1941 o - Ap lastSu 2 1 D
R v 1941 o - S lastSu 2 0 S
R v 1946 o - Ap lastSu 0:1 1 D
R v 1946 o - Jun 2 2 0 S
R v 1950 1961 - Ap lastSu 2 1 D
R v 1950 1955 - S lastSu 2 0 S
R v 1956 1961 - O lastSu 2 0 S
Z America/Kentucky/Louisville -5:43:2 - LMT 1883 N 18 12:16:58
-6 u C%sT 1921
-6 v C%sT 1942
-6 u C%sT 1946
-6 v C%sT 1961 Jul 23 2
-5 - EST 1968
-5 u E%sT 1974 Ja 6 2
-6 1 CDT 1974 O 27 2
-5 u E%sT
Z America/Kentucky/Monticello -5:39:24 - LMT 1883 N 18 12:20:36
-6 u C%sT 1946
-6 - CST 1968
-6 u C%sT 2000 O 29 2
-5 u E%sT
R Dt 1948 o - Ap lastSu 2 1 D
R Dt 1948 o - S lastSu 2 0 S
Z America/Detroit -5:32:11 - LMT 1905
-6 - CST 1915 May 15 2
-5 - EST 1942
-5 u E%sT 1946
-5 Dt E%sT 1967 Jun 14 0:1
-5 u E%sT 1969
-5 - EST 1973
-5 u E%sT 1975
-5 - EST 1975 Ap 27 2
-5 u E%sT
R Me 1946 o - Ap lastSu 2 1 D
R Me 1946 o - S lastSu 2 0 S
R Me 1966 o - Ap lastSu 2 1 D
R Me 1966 o - O lastSu 2 0 S
Z America/Menominee -5:50:27 - LMT 1885 S 18 12
-6 u C%sT 1946
-6 Me C%sT 1969 Ap 27 2
-5 - EST 1973 Ap 29 2
-6 u C%sT
R C 1918 o - Ap 14 2 1 D
R C 1918 o - O 27 2 0 S
R C 1942 o - F 9 2 1 W
R C 1945 o - Au 14 23u 1 P
R C 1945 o - S 30 2 0 S
R C 1974 1986 - Ap lastSu 2 1 D
R C 1974 2006 - O lastSu 2 0 S
R C 1987 2006 - Ap Su>=1 2 1 D
R C 2007 ma - Mar Su>=8 2 1 D
R C 2007 ma - N Su>=1 2 0 S
R j 1917 o - Ap 8 2 1 D
R j 1917 o - S 17 2 0 S
R j 1919 o - May 5 23 1 D
R j 1919 o - Au 12 23 0 S
R j 1920 1935 - May Su>=1 23 1 D
R j 1920 1935 - O lastSu 23 0 S
R j 1936 1941 - May M>=9 0 1 D
R j 1936 1941 - O M>=2 0 0 S
R j 1946 1950 - May Su>=8 2 1 D
R j 1946 1950 - O Su>=2 2 0 S
R j 1951 1986 - Ap lastSu 2 1 D
R j 1951 1959 - S lastSu 2 0 S
R j 1960 1986 - O lastSu 2 0 S
R j 1987 o - Ap Su>=1 0:1 1 D
R j 1987 2006 - O lastSu 0:1 0 S
R j 1988 o - Ap Su>=1 0:1 2 DD
R j 1989 2006 - Ap Su>=1 0:1 1 D
R j 2007 2011 - Mar Su>=8 0:1 1 D
R j 2007 2010 - N Su>=1 0:1 0 S
Z America/St_Johns -3:30:52 - LMT 1884
-3:30:52 j N%sT 1918
-3:30:52 C N%sT 1919
-3:30:52 j N%sT 1935 Mar 30
-3:30 j N%sT 1942 May 11
-3:30 C N%sT 1946
-3:30 j N%sT 2011 N
-3:30 C N%sT
Z America/Goose_Bay -4:1:40 - LMT 1884
-3:30:52 - NST 1918
-3:30:52 C N%sT 1919
-3:30:52 - NST 1935 Mar 30
-3:30 - NST 1936
-3:30 j N%sT 1942 May 11
-3:30 C N%sT 1946
-3:30 j N%sT 1966 Mar 15 2
-4 j A%sT 2011 N
-4 C A%sT
R H 1916 o - Ap 1 0 1 D
R H 1916 o - O 1 0 0 S
R H 1920 o - May 9 0 1 D
R H 1920 o - Au 29 0 0 S
R H 1921 o - May 6 0 1 D
R H 1921 1922 - S 5 0 0 S
R H 1922 o - Ap 30 0 1 D
R H 1923 1925 - May Su>=1 0 1 D
R H 1923 o - S 4 0 0 S
R H 1924 o - S 15 0 0 S
R H 1925 o - S 28 0 0 S
R H 1926 o - May 16 0 1 D
R H 1926 o - S 13 0 0 S
R H 1927 o - May 1 0 1 D
R H 1927 o - S 26 0 0 S
R H 1928 1931 - May Su>=8 0 1 D
R H 1928 o - S 9 0 0 S
R H 1929 o - S 3 0 0 S
R H 1930 o - S 15 0 0 S
R H 1931 1932 - S M>=24 0 0 S
R H 1932 o - May 1 0 1 D
R H 1933 o - Ap 30 0 1 D
R H 1933 o - O 2 0 0 S
R H 1934 o - May 20 0 1 D
R H 1934 o - S 16 0 0 S
R H 1935 o - Jun 2 0 1 D
R H 1935 o - S 30 0 0 S
R H 1936 o - Jun 1 0 1 D
R H 1936 o - S 14 0 0 S
R H 1937 1938 - May Su>=1 0 1 D
R H 1937 1941 - S M>=24 0 0 S
R H 1939 o - May 28 0 1 D
R H 1940 1941 - May Su>=1 0 1 D
R H 1946 1949 - Ap lastSu 2 1 D
R H 1946 1949 - S lastSu 2 0 S
R H 1951 1954 - Ap lastSu 2 1 D
R H 1951 1954 - S lastSu 2 0 S
R H 1956 1959 - Ap lastSu 2 1 D
R H 1956 1959 - S lastSu 2 0 S
R H 1962 1973 - Ap lastSu 2 1 D
R H 1962 1973 - O lastSu 2 0 S
Z America/Halifax -4:14:24 - LMT 1902 Jun 15
-4 H A%sT 1918
-4 C A%sT 1919
-4 H A%sT 1942 F 9 2s
-4 C A%sT 1946
-4 H A%sT 1974
-4 C A%sT
Z America/Glace_Bay -3:59:48 - LMT 1902 Jun 15
-4 C A%sT 1953
-4 H A%sT 1954
-4 - AST 1972
-4 H A%sT 1974
-4 C A%sT
R o 1933 1935 - Jun Su>=8 1 1 D
R o 1933 1935 - S Su>=8 1 0 S
R o 1936 1938 - Jun Su>=1 1 1 D
R o 1936 1938 - S Su>=1 1 0 S
R o 1939 o - May 27 1 1 D
R o 1939 1941 - S Sa>=21 1 0 S
R o 1940 o - May 19 1 1 D
R o 1941 o - May 4 1 1 D
R o 1946 1972 - Ap lastSu 2 1 D
R o 1946 1956 - S lastSu 2 0 S
R o 1957 1972 - O lastSu 2 0 S
R o 1993 2006 - Ap Su>=1 0:1 1 D
R o 1993 2006 - O lastSu 0:1 0 S
Z America/Moncton -4:19:8 - LMT 1883 D 9
-5 - EST 1902 Jun 15
-4 C A%sT 1933
-4 o A%sT 1942
-4 C A%sT 1946
-4 o A%sT 1973
-4 C A%sT 1993
-4 o A%sT 2007
-4 C A%sT
Z America/Blanc-Sablon -3:48:28 - LMT 1884
-4 C A%sT 1970
-4 - AST
R t 1919 o - Mar 30 23:30 1 D
R t 1919 o - O 26 0 0 S
R t 1920 o - May 2 2 1 D
R t 1920 o - S 26 0 0 S
R t 1921 o - May 15 2 1 D
R t 1921 o - S 15 2 0 S
R t 1922 1923 - May Su>=8 2 1 D
R t 1922 1926 - S Su>=15 2 0 S
R t 1924 1927 - May Su>=1 2 1 D
R t 1927 1937 - S Su>=25 2 0 S
R t 1928 1937 - Ap Su>=25 2 1 D
R t 1938 1940 - Ap lastSu 2 1 D
R t 1938 1939 - S lastSu 2 0 S
R t 1945 1946 - S lastSu 2 0 S
R t 1946 o - Ap lastSu 2 1 D
R t 1947 1949 - Ap lastSu 0 1 D
R t 1947 1948 - S lastSu 0 0 S
R t 1949 o - N lastSu 0 0 S
R t 1950 1973 - Ap lastSu 2 1 D
R t 1950 o - N lastSu 2 0 S
R t 1951 1956 - S lastSu 2 0 S
R t 1957 1973 - O lastSu 2 0 S
Z America/Toronto -5:17:32 - LMT 1895
-5 C E%sT 1919
-5 t E%sT 1942 F 9 2s
-5 C E%sT 1946
-5 t E%sT 1974
-5 C E%sT
Z America/Thunder_Bay -5:57 - LMT 1895
-6 - CST 1910
-5 - EST 1942
-5 C E%sT 1970
-5 t E%sT 1973
-5 - EST 1974
-5 C E%sT
Z America/Nipigon -5:53:4 - LMT 1895
-5 C E%sT 1940 S 29
-5 1 EDT 1942 F 9 2s
-5 C E%sT
Z America/Rainy_River -6:18:16 - LMT 1895
-6 C C%sT 1940 S 29
-6 1 CDT 1942 F 9 2s
-6 C C%sT
Z America/Atikokan -6:6:28 - LMT 1895
-6 C C%sT 1940 S 29
-6 1 CDT 1942 F 9 2s
-6 C C%sT 1945 S 30 2
-5 - EST
R W 1916 o - Ap 23 0 1 D
R W 1916 o - S 17 0 0 S
R W 1918 o - Ap 14 2 1 D
R W 1918 o - O 27 2 0 S
R W 1937 o - May 16 2 1 D
R W 1937 o - S 26 2 0 S
R W 1942 o - F 9 2 1 W
R W 1945 o - Au 14 23u 1 P
R W 1945 o - S lastSu 2 0 S
R W 1946 o - May 12 2 1 D
R W 1946 o - O 13 2 0 S
R W 1947 1949 - Ap lastSu 2 1 D
R W 1947 1949 - S lastSu 2 0 S
R W 1950 o - May 1 2 1 D
R W 1950 o - S 30 2 0 S
R W 1951 1960 - Ap lastSu 2 1 D
R W 1951 1958 - S lastSu 2 0 S
R W 1959 o - O lastSu 2 0 S
R W 1960 o - S lastSu 2 0 S
R W 1963 o - Ap lastSu 2 1 D
R W 1963 o - S 22 2 0 S
R W 1966 1986 - Ap lastSu 2s 1 D
R W 1966 2005 - O lastSu 2s 0 S
R W 1987 2005 - Ap Su>=1 2s 1 D
Z America/Winnipeg -6:28:36 - LMT 1887 Jul 16
-6 W C%sT 2006
-6 C C%sT
R r 1918 o - Ap 14 2 1 D
R r 1918 o - O 27 2 0 S
R r 1930 1934 - May Su>=1 0 1 D
R r 1930 1934 - O Su>=1 0 0 S
R r 1937 1941 - Ap Su>=8 0 1 D
R r 1937 o - O Su>=8 0 0 S
R r 1938 o - O Su>=1 0 0 S
R r 1939 1941 - O Su>=8 0 0 S
R r 1942 o - F 9 2 1 W
R r 1945 o - Au 14 23u 1 P
R r 1945 o - S lastSu 2 0 S
R r 1946 o - Ap Su>=8 2 1 D
R r 1946 o - O Su>=8 2 0 S
R r 1947 1957 - Ap lastSu 2 1 D
R r 1947 1957 - S lastSu 2 0 S
R r 1959 o - Ap lastSu 2 1 D
R r 1959 o - O lastSu 2 0 S
R Sw 1957 o - Ap lastSu 2 1 D
R Sw 1957 o - O lastSu 2 0 S
R Sw 1959 1961 - Ap lastSu 2 1 D
R Sw 1959 o - O lastSu 2 0 S
R Sw 1960 1961 - S lastSu 2 0 S
Z America/Regina -6:58:36 - LMT 1905 S
-7 r M%sT 1960 Ap lastSu 2
-6 - CST
Z America/Swift_Current -7:11:20 - LMT 1905 S
-7 C M%sT 1946 Ap lastSu 2
-7 r M%sT 1950
-7 Sw M%sT 1972 Ap lastSu 2
-6 - CST
R Ed 1918 1919 - Ap Su>=8 2 1 D
R Ed 1918 o - O 27 2 0 S
R Ed 1919 o - May 27 2 0 S
R Ed 1920 1923 - Ap lastSu 2 1 D
R Ed 1920 o - O lastSu 2 0 S
R Ed 1921 1923 - S lastSu 2 0 S
R Ed 1942 o - F 9 2 1 W
R Ed 1945 o - Au 14 23u 1 P
R Ed 1945 o - S lastSu 2 0 S
R Ed 1947 o - Ap lastSu 2 1 D
R Ed 1947 o - S lastSu 2 0 S
R Ed 1972 1986 - Ap lastSu 2 1 D
R Ed 1972 2006 - O lastSu 2 0 S
Z America/Edmonton -7:33:52 - LMT 1906 S
-7 Ed M%sT 1987
-7 C M%sT
R Va 1918 o - Ap 14 2 1 D
R Va 1918 o - O 27 2 0 S
R Va 1942 o - F 9 2 1 W
R Va 1945 o - Au 14 23u 1 P
R Va 1945 o - S 30 2 0 S
R Va 1946 1986 - Ap lastSu 2 1 D
R Va 1946 o - S 29 2 0 S
R Va 1947 1961 - S lastSu 2 0 S
R Va 1962 2006 - O lastSu 2 0 S
Z America/Vancouver -8:12:28 - LMT 1884
-8 Va P%sT 1987
-8 C P%sT
Z America/Dawson_Creek -8:0:56 - LMT 1884
-8 C P%sT 1947
-8 Va P%sT 1972 Au 30 2
-7 - MST
Z America/Fort_Nelson -8:10:47 - LMT 1884
-8 Va P%sT 1946
-8 - PST 1947
-8 Va P%sT 1987
-8 C P%sT 2015 Mar 8 2
-7 - MST
Z America/Creston -7:46:4 - LMT 1884
-7 - MST 1916 O
-8 - PST 1918 Jun 2
-7 - MST
R Y 1918 o - Ap 14 2 1 D
R Y 1918 o - O 27 2 0 S
R Y 1919 o - May 25 2 1 D
R Y 1919 o - N 1 0 0 S
R Y 1942 o - F 9 2 1 W
R Y 1945 o - Au 14 23u 1 P
R Y 1945 o - S 30 2 0 S
R Y 1965 o - Ap lastSu 0 2 DD
R Y 1965 o - O lastSu 2 0 S
R Y 1980 1986 - Ap lastSu 2 1 D
R Y 1980 2006 - O lastSu 2 0 S
R Y 1987 2006 - Ap Su>=1 2 1 D
Z America/Pangnirtung 0 - -00 1921
-4 Y A%sT 1995 Ap Su>=1 2
-5 C E%sT 1999 O 31 2
-6 C C%sT 2000 O 29 2
-5 C E%sT
Z America/Iqaluit 0 - -00 1942 Au
-5 Y E%sT 1999 O 31 2
-6 C C%sT 2000 O 29 2
-5 C E%sT
Z America/Resolute 0 - -00 1947 Au 31
-6 Y C%sT 2000 O 29 2
-5 - EST 2001 Ap 1 3
-6 C C%sT 2006 O 29 2
-5 - EST 2007 Mar 11 3
-6 C C%sT
Z America/Rankin_Inlet 0 - -00 1957
-6 Y C%sT 2000 O 29 2
-5 - EST 2001 Ap 1 3
-6 C C%sT
Z America/Cambridge_Bay 0 - -00 1920
-7 Y M%sT 1999 O 31 2
-6 C C%sT 2000 O 29 2
-5 - EST 2000 N 5
-6 - CST 2001 Ap 1 3
-7 C M%sT
Z America/Yellowknife 0 - -00 1935
-7 Y M%sT 1980
-7 C M%sT
Z America/Inuvik 0 - -00 1953
-8 Y P%sT 1979 Ap lastSu 2
-7 Y M%sT 1980
-7 C M%sT
Z America/Whitehorse -9:0:12 - LMT 1900 Au 20
-9 Y Y%sT 1967 May 28
-8 Y P%sT 1980
-8 C P%sT 2020 Mar 8 2
-7 - MST
Z America/Dawson -9:17:40 - LMT 1900 Au 20
-9 Y Y%sT 1973 O 28
-8 Y P%sT 1980
-8 C P%sT 2020 Mar 8 2
-7 - MST
R m 1939 o - F 5 0 1 D
R m 1939 o - Jun 25 0 0 S
R m 1940 o - D 9 0 1 D
R m 1941 o - Ap 1 0 0 S
R m 1943 o - D 16 0 1 W
R m 1944 o - May 1 0 0 S
R m 1950 o - F 12 0 1 D
R m 1950 o - Jul 30 0 0 S
R m 1996 2000 - Ap Su>=1 2 1 D
R m 1996 2000 - O lastSu 2 0 S
R m 2001 o - May Su>=1 2 1 D
R m 2001 o - S lastSu 2 0 S
R m 2002 ma - Ap Su>=1 2 1 D
R m 2002 ma - O lastSu 2 0 S
Z America/Cancun -5:47:4 - LMT 1922 Ja 1 0:12:56
-6 - CST 1981 D 23
-5 m E%sT 1998 Au 2 2
-6 m C%sT 2015 F 1 2
-5 - EST
Z America/Merida -5:58:28 - LMT 1922 Ja 1 0:1:32
-6 - CST 1981 D 23
-5 - EST 1982 D 2
-6 m C%sT
Z America/Matamoros -6:40 - LMT 1921 D 31 23:20
-6 - CST 1988
-6 u C%sT 1989
-6 m C%sT 2010
-6 u C%sT
Z America/Monterrey -6:41:16 - LMT 1921 D 31 23:18:44
-6 - CST 1988
-6 u C%sT 1989
-6 m C%sT
Z America/Mexico_City -6:36:36 - LMT 1922 Ja 1 0:23:24
-7 - MST 1927 Jun 10 23
-6 - CST 1930 N 15
-7 - MST 1931 May 1 23
-6 - CST 1931 O
-7 - MST 1932 Ap
-6 m C%sT 2001 S 30 2
-6 - CST 2002 F 20
-6 m C%sT
Z America/Ojinaga -6:57:40 - LMT 1922 Ja 1 0:2:20
-7 - MST 1927 Jun 10 23
-6 - CST 1930 N 15
-7 - MST 1931 May 1 23
-6 - CST 1931 O
-7 - MST 1932 Ap
-6 - CST 1996
-6 m C%sT 1998
-6 - CST 1998 Ap Su>=1 3
-7 m M%sT 2010
-7 u M%sT
Z America/Chihuahua -7:4:20 - LMT 1921 D 31 23:55:40
-7 - MST 1927 Jun 10 23
-6 - CST 1930 N 15
-7 - MST 1931 May 1 23
-6 - CST 1931 O
-7 - MST 1932 Ap
-6 - CST 1996
-6 m C%sT 1998
-6 - CST 1998 Ap Su>=1 3
-7 m M%sT
Z America/Hermosillo -7:23:52 - LMT 1921 D 31 23:36:8
-7 - MST 1927 Jun 10 23
-6 - CST 1930 N 15
-7 - MST 1931 May 1 23
-6 - CST 1931 O
-7 - MST 1932 Ap
-6 - CST 1942 Ap 24
-7 - MST 1949 Ja 14
-8 - PST 1970
-7 m M%sT 1999
-7 - MST
Z America/Mazatlan -7:5:40 - LMT 1921 D 31 23:54:20
-7 - MST 1927 Jun 10 23
-6 - CST 1930 N 15
-7 - MST 1931 May 1 23
-6 - CST 1931 O
-7 - MST 1932 Ap
-6 - CST 1942 Ap 24
-7 - MST 1949 Ja 14
-8 - PST 1970
-7 m M%sT
Z America/Bahia_Banderas -7:1 - LMT 1921 D 31 23:59
-7 - MST 1927 Jun 10 23
-6 - CST 1930 N 15
-7 - MST 1931 May 1 23
-6 - CST 1931 O
-7 - MST 1932 Ap
-6 - CST 1942 Ap 24
-7 - MST 1949 Ja 14
-8 - PST 1970
-7 m M%sT 2010 Ap 4 2
-6 m C%sT
Z America/Tijuana -7:48:4 - LMT 1922 Ja 1 0:11:56
-7 - MST 1924
-8 - PST 1927 Jun 10 23
-7 - MST 1930 N 15
-8 - PST 1931 Ap
-8 1 PDT 1931 S 30
-8 - PST 1942 Ap 24
-8 1 PWT 1945 Au 14 23u
-8 1 PPT 1945 N 12
-8 - PST 1948 Ap 5
-8 1 PDT 1949 Ja 14
-8 - PST 1954
-8 CA P%sT 1961
-8 - PST 1976
-8 u P%sT 1996
-8 m P%sT 2001
-8 u P%sT 2002 F 20
-8 m P%sT 2010
-8 u P%sT
R BS 1964 1975 - O lastSu 2 0 S
R BS 1964 1975 - Ap lastSu 2 1 D
Z America/Nassau -5:9:30 - LMT 1912 Mar 2
-5 BS E%sT 1976
-5 u E%sT
R BB 1977 o - Jun 12 2 1 D
R BB 1977 1978 - O Su>=1 2 0 S
R BB 1978 1980 - Ap Su>=15 2 1 D
R BB 1979 o - S 30 2 0 S
R BB 1980 o - S 25 2 0 S
Z America/Barbados -3:58:29 - LMT 1924
-3:58:29 - BMT 1932
-4 BB A%sT
R BZ 1918 1942 - O Su>=2 0 0:30 -0530
R BZ 1919 1943 - F Su>=9 0 0 CST
R BZ 1973 o - D 5 0 1 CDT
R BZ 1974 o - F 9 0 0 CST
R BZ 1982 o - D 18 0 1 CDT
R BZ 1983 o - F 12 0 0 CST
Z America/Belize -5:52:48 - LMT 1912 Ap
-6 BZ %s
Z Atlantic/Bermuda -4:19:18 - LMT 1930 Ja 1 2
-4 - AST 1974 Ap 28 2
-4 C A%sT 1976
-4 u A%sT
R CR 1979 1980 - F lastSu 0 1 D
R CR 1979 1980 - Jun Su>=1 0 0 S
R CR 1991 1992 - Ja Sa>=15 0 1 D
R CR 1991 o - Jul 1 0 0 S
R CR 1992 o - Mar 15 0 0 S
Z America/Costa_Rica -5:36:13 - LMT 1890
-5:36:13 - SJMT 1921 Ja 15
-6 CR C%sT
R Q 1928 o - Jun 10 0 1 D
R Q 1928 o - O 10 0 0 S
R Q 1940 1942 - Jun Su>=1 0 1 D
R Q 1940 1942 - S Su>=1 0 0 S
R Q 1945 1946 - Jun Su>=1 0 1 D
R Q 1945 1946 - S Su>=1 0 0 S
R Q 1965 o - Jun 1 0 1 D
R Q 1965 o - S 30 0 0 S
R Q 1966 o - May 29 0 1 D
R Q 1966 o - O 2 0 0 S
R Q 1967 o - Ap 8 0 1 D
R Q 1967 1968 - S Su>=8 0 0 S
R Q 1968 o - Ap 14 0 1 D
R Q 1969 1977 - Ap lastSu 0 1 D
R Q 1969 1971 - O lastSu 0 0 S
R Q 1972 1974 - O 8 0 0 S
R Q 1975 1977 - O lastSu 0 0 S
R Q 1978 o - May 7 0 1 D
R Q 1978 1990 - O Su>=8 0 0 S
R Q 1979 1980 - Mar Su>=15 0 1 D
R Q 1981 1985 - May Su>=5 0 1 D
R Q 1986 1989 - Mar Su>=14 0 1 D
R Q 1990 1997 - Ap Su>=1 0 1 D
R Q 1991 1995 - O Su>=8 0s 0 S
R Q 1996 o - O 6 0s 0 S
R Q 1997 o - O 12 0s 0 S
R Q 1998 1999 - Mar lastSu 0s 1 D
R Q 1998 2003 - O lastSu 0s 0 S
R Q 2000 2003 - Ap Su>=1 0s 1 D
R Q 2004 o - Mar lastSu 0s 1 D
R Q 2006 2010 - O lastSu 0s 0 S
R Q 2007 o - Mar Su>=8 0s 1 D
R Q 2008 o - Mar Su>=15 0s 1 D
R Q 2009 2010 - Mar Su>=8 0s 1 D
R Q 2011 o - Mar Su>=15 0s 1 D
R Q 2011 o - N 13 0s 0 S
R Q 2012 o - Ap 1 0s 1 D
R Q 2012 ma - N Su>=1 0s 0 S
R Q 2013 ma - Mar Su>=8 0s 1 D
Z America/Havana -5:29:28 - LMT 1890
-5:29:36 - HMT 1925 Jul 19 12
-5 Q C%sT
R DO 1966 o - O 30 0 1 EDT
R DO 1967 o - F 28 0 0 EST
R DO 1969 1973 - O lastSu 0 0:30 -0430
R DO 1970 o - F 21 0 0 EST
R DO 1971 o - Ja 20 0 0 EST
R DO 1972 1974 - Ja 21 0 0 EST
Z America/Santo_Domingo -4:39:36 - LMT 1890
-4:40 - SDMT 1933 Ap 1 12
-5 DO %s 1974 O 27
-4 - AST 2000 O 29 2
-5 u E%sT 2000 D 3 1
-4 - AST
R SV 1987 1988 - May Su>=1 0 1 D
R SV 1987 1988 - S lastSu 0 0 S
Z America/El_Salvador -5:56:48 - LMT 1921
-6 SV C%sT
R GT 1973 o - N 25 0 1 D
R GT 1974 o - F 24 0 0 S
R GT 1983 o - May 21 0 1 D
R GT 1983 o - S 22 0 0 S
R GT 1991 o - Mar 23 0 1 D
R GT 1991 o - S 7 0 0 S
R GT 2006 o - Ap 30 0 1 D
R GT 2006 o - O 1 0 0 S
Z America/Guatemala -6:2:4 - LMT 1918 O 5
-6 GT C%sT
R HT 1983 o - May 8 0 1 D
R HT 1984 1987 - Ap lastSu 0 1 D
R HT 1983 1987 - O lastSu 0 0 S
R HT 1988 1997 - Ap Su>=1 1s 1 D
R HT 1988 1997 - O lastSu 1s 0 S
R HT 2005 2006 - Ap Su>=1 0 1 D
R HT 2005 2006 - O lastSu 0 0 S
R HT 2012 2015 - Mar Su>=8 2 1 D
R HT 2012 2015 - N Su>=1 2 0 S
R HT 2017 ma - Mar Su>=8 2 1 D
R HT 2017 ma - N Su>=1 2 0 S
Z America/Port-au-Prince -4:49:20 - LMT 1890
-4:49 - PPMT 1917 Ja 24 12
-5 HT E%sT
R HN 1987 1988 - May Su>=1 0 1 D
R HN 1987 1988 - S lastSu 0 0 S
R HN 2006 o - May Su>=1 0 1 D
R HN 2006 o - Au M>=1 0 0 S
Z America/Tegucigalpa -5:48:52 - LMT 1921 Ap
-6 HN C%sT
Z America/Jamaica -5:7:10 - LMT 1890
-5:7:10 - KMT 1912 F
-5 - EST 1974
-5 u E%sT 1984
-5 - EST
Z America/Martinique -4:4:20 - LMT 1890
-4:4:20 - FFMT 1911 May
-4 - AST 1980 Ap 6
-4 1 ADT 1980 S 28
-4 - AST
R NI 1979 1980 - Mar Su>=16 0 1 D
R NI 1979 1980 - Jun M>=23 0 0 S
R NI 2005 o - Ap 10 0 1 D
R NI 2005 o - O Su>=1 0 0 S
R NI 2006 o - Ap 30 2 1 D
R NI 2006 o - O Su>=1 1 0 S
Z America/Managua -5:45:8 - LMT 1890
-5:45:12 - MMT 1934 Jun 23
-6 - CST 1973 May
-5 - EST 1975 F 16
-6 NI C%sT 1992 Ja 1 4
-5 - EST 1992 S 24
-6 - CST 1993
-5 - EST 1997
-6 NI C%sT
Z America/Panama -5:18:8 - LMT 1890
-5:19:36 - CMT 1908 Ap 22
-5 - EST
L America/Panama America/Cayman
Z America/Puerto_Rico -4:24:25 - LMT 1899 Mar 28 12
-4 - AST 1942 May 3
-4 u A%sT 1946
-4 - AST
Z America/Miquelon -3:44:40 - LMT 1911 May 15
-4 - AST 1980 May
-3 - -03 1987
-3 C -03/-02
Z America/Grand_Turk -4:44:32 - LMT 1890
-5:7:10 - KMT 1912 F
-5 - EST 1979
-5 u E%sT 2015 N Su>=1 2
-4 - AST 2018 Mar 11 3
-5 u E%sT
R A 1930 o - D 1 0 1 -
R A 1931 o - Ap 1 0 0 -
R A 1931 o - O 15 0 1 -
R A 1932 1940 - Mar 1 0 0 -
R A 1932 1939 - N 1 0 1 -
R A 1940 o - Jul 1 0 1 -
R A 1941 o - Jun 15 0 0 -
R A 1941 o - O 15 0 1 -
R A 1943 o - Au 1 0 0 -
R A 1943 o - O 15 0 1 -
R A 1946 o - Mar 1 0 0 -
R A 1946 o - O 1 0 1 -
R A 1963 o - O 1 0 0 -
R A 1963 o - D 15 0 1 -
R A 1964 1966 - Mar 1 0 0 -
R A 1964 1966 - O 15 0 1 -
R A 1967 o - Ap 2 0 0 -
R A 1967 1968 - O Su>=1 0 1 -
R A 1968 1969 - Ap Su>=1 0 0 -
R A 1974 o - Ja 23 0 1 -
R A 1974 o - May 1 0 0 -
R A 1988 o - D 1 0 1 -
R A 1989 1993 - Mar Su>=1 0 0 -
R A 1989 1992 - O Su>=15 0 1 -
R A 1999 o - O Su>=1 0 1 -
R A 2000 o - Mar 3 0 0 -
R A 2007 o - D 30 0 1 -
R A 2008 2009 - Mar Su>=15 0 0 -
R A 2008 o - O Su>=15 0 1 -
Z America/Argentina/Buenos_Aires -3:53:48 - LMT 1894 O 31
-4:16:48 - CMT 1920 May
-4 - -04 1930 D
-4 A -04/-03 1969 O 5
-3 A -03/-02 1999 O 3
-4 A -04/-03 2000 Mar 3
-3 A -03/-02
Z America/Argentina/Cordoba -4:16:48 - LMT 1894 O 31
-4:16:48 - CMT 1920 May
-4 - -04 1930 D
-4 A -04/-03 1969 O 5
-3 A -03/-02 1991 Mar 3
-4 - -04 1991 O 20
-3 A -03/-02 1999 O 3
-4 A -04/-03 2000 Mar 3
-3 A -03/-02
Z America/Argentina/Salta -4:21:40 - LMT 1894 O 31
-4:16:48 - CMT 1920 May
-4 - -04 1930 D
-4 A -04/-03 1969 O 5
-3 A -03/-02 1991 Mar 3
-4 - -04 1991 O 20
-3 A -03/-02 1999 O 3
-4 A -04/-03 2000 Mar 3
-3 A -03/-02 2008 O 18
-3 - -03
Z America/Argentina/Tucuman -4:20:52 - LMT 1894 O 31
-4:16:48 - CMT 1920 May
-4 - -04 1930 D
-4 A -04/-03 1969 O 5
-3 A -03/-02 1991 Mar 3
-4 - -04 1991 O 20
-3 A -03/-02 1999 O 3
-4 A -04/-03 2000 Mar 3
-3 - -03 2004 Jun
-4 - -04 2004 Jun 13
-3 A -03/-02
Z America/Argentina/La_Rioja -4:27:24 - LMT 1894 O 31
-4:16:48 - CMT 1920 May
-4 - -04 1930 D
-4 A -04/-03 1969 O 5
-3 A -03/-02 1991 Mar
-4 - -04 1991 May 7
-3 A -03/-02 1999 O 3
-4 A -04/-03 2000 Mar 3
-3 - -03 2004 Jun
-4 - -04 2004 Jun 20
-3 A -03/-02 2008 O 18
-3 - -03
Z America/Argentina/San_Juan -4:34:4 - LMT 1894 O 31
-4:16:48 - CMT 1920 May
-4 - -04 1930 D
-4 A -04/-03 1969 O 5
-3 A -03/-02 1991 Mar
-4 - -04 1991 May 7
-3 A -03/-02 1999 O 3
-4 A -04/-03 2000 Mar 3
-3 - -03 2004 May 31
-4 - -04 2004 Jul 25
-3 A -03/-02 2008 O 18
-3 - -03
Z America/Argentina/Jujuy -4:21:12 - LMT 1894 O 31
-4:16:48 - CMT 1920 May
-4 - -04 1930 D
-4 A -04/-03 1969 O 5
-3 A -03/-02 1990 Mar 4
-4 - -04 1990 O 28
-4 1 -03 1991 Mar 17
-4 - -04 1991 O 6
-3 1 -02 1992
-3 A -03/-02 1999 O 3
-4 A -04/-03 2000 Mar 3
-3 A -03/-02 2008 O 18
-3 - -03
Z America/Argentina/Catamarca -4:23:8 - LMT 1894 O 31
-4:16:48 - CMT 1920 May
-4 - -04 1930 D
-4 A -04/-03 1969 O 5
-3 A -03/-02 1991 Mar 3
-4 - -04 1991 O 20
-3 A -03/-02 1999 O 3
-4 A -04/-03 2000 Mar 3
-3 - -03 2004 Jun
-4 - -04 2004 Jun 20
-3 A -03/-02 2008 O 18
-3 - -03
Z America/Argentina/Mendoza -4:35:16 - LMT 1894 O 31
-4:16:48 - CMT 1920 May
-4 - -04 1930 D
-4 A -04/-03 1969 O 5
-3 A -03/-02 1990 Mar 4
-4 - -04 1990 O 15
-4 1 -03 1991 Mar
-4 - -04 1991 O 15
-4 1 -03 1992 Mar
-4 - -04 1992 O 18
-3 A -03/-02 1999 O 3
-4 A -04/-03 2000 Mar 3
-3 - -03 2004 May 23
-4 - -04 2004 S 26
-3 A -03/-02 2008 O 18
-3 - -03
R Sa 2008 2009 - Mar Su>=8 0 0 -
R Sa 2007 2008 - O Su>=8 0 1 -
Z America/Argentina/San_Luis -4:25:24 - LMT 1894 O 31
-4:16:48 - CMT 1920 May
-4 - -04 1930 D
-4 A -04/-03 1969 O 5
-3 A -03/-02 1990
-3 1 -02 1990 Mar 14
-4 - -04 1990 O 15
-4 1 -03 1991 Mar
-4 - -04 1991 Jun
-3 - -03 1999 O 3
-4 1 -03 2000 Mar 3
-3 - -03 2004 May 31
-4 - -04 2004 Jul 25
-3 A -03/-02 2008 Ja 21
-4 Sa -04/-03 2009 O 11
-3 - -03
Z America/Argentina/Rio_Gallegos -4:36:52 - LMT 1894 O 31
-4:16:48 - CMT 1920 May
-4 - -04 1930 D
-4 A -04/-03 1969 O 5
-3 A -03/-02 1999 O 3
-4 A -04/-03 2000 Mar 3
-3 - -03 2004 Jun
-4 - -04 2004 Jun 20
-3 A -03/-02 2008 O 18
-3 - -03
Z America/Argentina/Ushuaia -4:33:12 - LMT 1894 O 31
-4:16:48 - CMT 1920 May
-4 - -04 1930 D
-4 A -04/-03 1969 O 5
-3 A -03/-02 1999 O 3
-4 A -04/-03 2000 Mar 3
-3 - -03 2004 May 30
-4 - -04 2004 Jun 20
-3 A -03/-02 2008 O 18
-3 - -03
L America/Curacao America/Aruba
Z America/La_Paz -4:32:36 - LMT 1890
-4:32:36 - CMT 1931 O 15
-4:32:36 1 BST 1932 Mar 21
-4 - -04
R B 1931 o - O 3 11 1 -
R B 1932 1933 - Ap 1 0 0 -
R B 1932 o - O 3 0 1 -
R B 1949 1952 - D 1 0 1 -
R B 1950 o - Ap 16 1 0 -
R B 1951 1952 - Ap 1 0 0 -
R B 1953 o - Mar 1 0 0 -
R B 1963 o - D 9 0 1 -
R B 1964 o - Mar 1 0 0 -
R B 1965 o - Ja 31 0 1 -
R B 1965 o - Mar 31 0 0 -
R B 1965 o - D 1 0 1 -
R B 1966 1968 - Mar 1 0 0 -
R B 1966 1967 - N 1 0 1 -
R B 1985 o - N 2 0 1 -
R B 1986 o - Mar 15 0 0 -
R B 1986 o - O 25 0 1 -
R B 1987 o - F 14 0 0 -
R B 1987 o - O 25 0 1 -
R B 1988 o - F 7 0 0 -
R B 1988 o - O 16 0 1 -
R B 1989 o - Ja 29 0 0 -
R B 1989 o - O 15 0 1 -
R B 1990 o - F 11 0 0 -
R B 1990 o - O 21 0 1 -
R B 1991 o - F 17 0 0 -
R B 1991 o - O 20 0 1 -
R B 1992 o - F 9 0 0 -
R B 1992 o - O 25 0 1 -
R B 1993 o - Ja 31 0 0 -
R B 1993 1995 - O Su>=11 0 1 -
R B 1994 1995 - F Su>=15 0 0 -
R B 1996 o - F 11 0 0 -
R B 1996 o - O 6 0 1 -
R B 1997 o - F 16 0 0 -
R B 1997 o - O 6 0 1 -
R B 1998 o - Mar 1 0 0 -
R B 1998 o - O 11 0 1 -
R B 1999 o - F 21 0 0 -
R B 1999 o - O 3 0 1 -
R B 2000 o - F 27 0 0 -
R B 2000 2001 - O Su>=8 0 1 -
R B 2001 2006 - F Su>=15 0 0 -
R B 2002 o - N 3 0 1 -
R B 2003 o - O 19 0 1 -
R B 2004 o - N 2 0 1 -
R B 2005 o - O 16 0 1 -
R B 2006 o - N 5 0 1 -
R B 2007 o - F 25 0 0 -
R B 2007 o - O Su>=8 0 1 -
R B 2008 2017 - O Su>=15 0 1 -
R B 2008 2011 - F Su>=15 0 0 -
R B 2012 o - F Su>=22 0 0 -
R B 2013 2014 - F Su>=15 0 0 -
R B 2015 o - F Su>=22 0 0 -
R B 2016 2019 - F Su>=15 0 0 -
R B 2018 o - N Su>=1 0 1 -
Z America/Noronha -2:9:40 - LMT 1914
-2 B -02/-01 1990 S 17
-2 - -02 1999 S 30
-2 B -02/-01 2000 O 15
-2 - -02 2001 S 13
-2 B -02/-01 2002 O
-2 - -02
Z America/Belem -3:13:56 - LMT 1914
-3 B -03/-02 1988 S 12
-3 - -03
Z America/Santarem -3:38:48 - LMT 1914
-4 B -04/-03 1988 S 12
-4 - -04 2008 Jun 24
-3 - -03
Z America/Fortaleza -2:34 - LMT 1914
-3 B -03/-02 1990 S 17
-3 - -03 1999 S 30
-3 B -03/-02 2000 O 22
-3 - -03 2001 S 13
-3 B -03/-02 2002 O
-3 - -03
Z America/Recife -2:19:36 - LMT 1914
-3 B -03/-02 1990 S 17
-3 - -03 1999 S 30
-3 B -03/-02 2000 O 15
-3 - -03 2001 S 13
-3 B -03/-02 2002 O
-3 - -03
Z America/Araguaina -3:12:48 - LMT 1914
-3 B -03/-02 1990 S 17
-3 - -03 1995 S 14
-3 B -03/-02 2003 S 24
-3 - -03 2012 O 21
-3 B -03/-02 2013 S
-3 - -03
Z America/Maceio -2:22:52 - LMT 1914
-3 B -03/-02 1990 S 17
-3 - -03 1995 O 13
-3 B -03/-02 1996 S 4
-3 - -03 1999 S 30
-3 B -03/-02 2000 O 22
-3 - -03 2001 S 13
-3 B -03/-02 2002 O
-3 - -03
Z America/Bahia -2:34:4 - LMT 1914
-3 B -03/-02 2003 S 24
-3 - -03 2011 O 16
-3 B -03/-02 2012 O 21
-3 - -03
Z America/Sao_Paulo -3:6:28 - LMT 1914
-3 B -03/-02 1963 O 23
-3 1 -02 1964
-3 B -03/-02
Z America/Campo_Grande -3:38:28 - LMT 1914
-4 B -04/-03
Z America/Cuiaba -3:44:20 - LMT 1914
-4 B -04/-03 2003 S 24
-4 - -04 2004 O
-4 B -04/-03
Z America/Porto_Velho -4:15:36 - LMT 1914
-4 B -04/-03 1988 S 12
-4 - -04
Z America/Boa_Vista -4:2:40 - LMT 1914
-4 B -04/-03 1988 S 12
-4 - -04 1999 S 30
-4 B -04/-03 2000 O 15
-4 - -04
Z America/Manaus -4:0:4 - LMT 1914
-4 B -04/-03 1988 S 12
-4 - -04 1993 S 28
-4 B -04/-03 1994 S 22
-4 - -04
Z America/Eirunepe -4:39:28 - LMT 1914
-5 B -05/-04 1988 S 12
-5 - -05 1993 S 28
-5 B -05/-04 1994 S 22
-5 - -05 2008 Jun 24
-4 - -04 2013 N 10
-5 - -05
Z America/Rio_Branco -4:31:12 - LMT 1914
-5 B -05/-04 1988 S 12
-5 - -05 2008 Jun 24
-4 - -04 2013 N 10
-5 - -05
R x 1927 1931 - S 1 0 1 -
R x 1928 1932 - Ap 1 0 0 -
R x 1968 o - N 3 4u 1 -
R x 1969 o - Mar 30 3u 0 -
R x 1969 o - N 23 4u 1 -
R x 1970 o - Mar 29 3u 0 -
R x 1971 o - Mar 14 3u 0 -
R x 1970 1972 - O Su>=9 4u 1 -
R x 1972 1986 - Mar Su>=9 3u 0 -
R x 1973 o - S 30 4u 1 -
R x 1974 1987 - O Su>=9 4u 1 -
R x 1987 o - Ap 12 3u 0 -
R x 1988 1990 - Mar Su>=9 3u 0 -
R x 1988 1989 - O Su>=9 4u 1 -
R x 1990 o - S 16 4u 1 -
R x 1991 1996 - Mar Su>=9 3u 0 -
R x 1991 1997 - O Su>=9 4u 1 -
R x 1997 o - Mar 30 3u 0 -
R x 1998 o - Mar Su>=9 3u 0 -
R x 1998 o - S 27 4u 1 -
R x 1999 o - Ap 4 3u 0 -
R x 1999 2010 - O Su>=9 4u 1 -
R x 2000 2007 - Mar Su>=9 3u 0 -
R x 2008 o - Mar 30 3u 0 -
R x 2009 o - Mar Su>=9 3u 0 -
R x 2010 o - Ap Su>=1 3u 0 -
R x 2011 o - May Su>=2 3u 0 -
R x 2011 o - Au Su>=16 4u 1 -
R x 2012 2014 - Ap Su>=23 3u 0 -
R x 2012 2014 - S Su>=2 4u 1 -
R x 2016 2018 - May Su>=9 3u 0 -
R x 2016 2018 - Au Su>=9 4u 1 -
R x 2019 ma - Ap Su>=2 3u 0 -
R x 2019 ma - S Su>=2 4u 1 -
Z America/Santiago -4:42:46 - LMT 1890
-4:42:46 - SMT 1910 Ja 10
-5 - -05 1916 Jul
-4:42:46 - SMT 1918 S 10
-4 - -04 1919 Jul
-4:42:46 - SMT 1927 S
-5 x -05/-04 1932 S
-4 - -04 1942 Jun
-5 - -05 1942 Au
-4 - -04 1946 Jul 15
-4 1 -03 1946 S
-4 - -04 1947 Ap
-5 - -05 1947 May 21 23
-4 x -04/-03
Z America/Punta_Arenas -4:43:40 - LMT 1890
-4:42:46 - SMT 1910 Ja 10
-5 - -05 1916 Jul
-4:42:46 - SMT 1918 S 10
-4 - -04 1919 Jul
-4:42:46 - SMT 1927 S
-5 x -05/-04 1932 S
-4 - -04 1942 Jun
-5 - -05 1942 Au
-4 - -04 1947 Ap
-5 - -05 1947 May 21 23
-4 x -04/-03 2016 D 4
-3 - -03
Z Pacific/Easter -7:17:28 - LMT 1890
-7:17:28 - EMT 1932 S
-7 x -07/-06 1982 Mar 14 3u
-6 x -06/-05
Z Antarctica/Palmer 0 - -00 1965
-4 A -04/-03 1969 O 5
-3 A -03/-02 1982 May
-4 x -04/-03 2016 D 4
-3 - -03
R CO 1992 o - May 3 0 1 -
R CO 1993 o - Ap 4 0 0 -
Z America/Bogota -4:56:16 - LMT 1884 Mar 13
-4:56:16 - BMT 1914 N 23
-5 CO -05/-04
Z America/Curacao -4:35:47 - LMT 1912 F 12
-4:30 - -0430 1965
-4 - AST
L America/Curacao America/Lower_Princes
L America/Curacao America/Kralendijk
R EC 1992 o - N 28 0 1 -
R EC 1993 o - F 5 0 0 -
Z America/Guayaquil -5:19:20 - LMT 1890
-5:14 - QMT 1931
-5 EC -05/-04
Z Pacific/Galapagos -5:58:24 - LMT 1931
-5 - -05 1986
-6 EC -06/-05
R FK 1937 1938 - S lastSu 0 1 -
R FK 1938 1942 - Mar Su>=19 0 0 -
R FK 1939 o - O 1 0 1 -
R FK 1940 1942 - S lastSu 0 1 -
R FK 1943 o - Ja 1 0 0 -
R FK 1983 o - S lastSu 0 1 -
R FK 1984 1985 - Ap lastSu 0 0 -
R FK 1984 o - S 16 0 1 -
R FK 1985 2000 - S Su>=9 0 1 -
R FK 1986 2000 - Ap Su>=16 0 0 -
R FK 2001 2010 - Ap Su>=15 2 0 -
R FK 2001 2010 - S Su>=1 2 1 -
Z Atlantic/Stanley -3:51:24 - LMT 1890
-3:51:24 - SMT 1912 Mar 12
-4 FK -04/-03 1983 May
-3 FK -03/-02 1985 S 15
-4 FK -04/-03 2010 S 5 2
-3 - -03
Z America/Cayenne -3:29:20 - LMT 1911 Jul
-4 - -04 1967 O
-3 - -03
Z America/Guyana -3:52:40 - LMT 1915 Mar
-3:45 - -0345 1975 Jul 31
-3 - -03 1991
-4 - -04
R y 1975 1988 - O 1 0 1 -
R y 1975 1978 - Mar 1 0 0 -
R y 1979 1991 - Ap 1 0 0 -
R y 1989 o - O 22 0 1 -
R y 1990 o - O 1 0 1 -
R y 1991 o - O 6 0 1 -
R y 1992 o - Mar 1 0 0 -
R y 1992 o - O 5 0 1 -
R y 1993 o - Mar 31 0 0 -
R y 1993 1995 - O 1 0 1 -
R y 1994 1995 - F lastSu 0 0 -
R y 1996 o - Mar 1 0 0 -
R y 1996 2001 - O Su>=1 0 1 -
R y 1997 o - F lastSu 0 0 -
R y 1998 2001 - Mar Su>=1 0 0 -
R y 2002 2004 - Ap Su>=1 0 0 -
R y 2002 2003 - S Su>=1 0 1 -
R y 2004 2009 - O Su>=15 0 1 -
R y 2005 2009 - Mar Su>=8 0 0 -
R y 2010 ma - O Su>=1 0 1 -
R y 2010 2012 - Ap Su>=8 0 0 -
R y 2013 ma - Mar Su>=22 0 0 -
Z America/Asuncion -3:50:40 - LMT 1890
-3:50:40 - AMT 1931 O 10
-4 - -04 1972 O
-3 - -03 1974 Ap
-4 y -04/-03
R PE 1938 o - Ja 1 0 1 -
R PE 1938 o - Ap 1 0 0 -
R PE 1938 1939 - S lastSu 0 1 -
R PE 1939 1940 - Mar Su>=24 0 0 -
R PE 1986 1987 - Ja 1 0 1 -
R PE 1986 1987 - Ap 1 0 0 -
R PE 1990 o - Ja 1 0 1 -
R PE 1990 o - Ap 1 0 0 -
R PE 1994 o - Ja 1 0 1 -
R PE 1994 o - Ap 1 0 0 -
Z America/Lima -5:8:12 - LMT 1890
-5:8:36 - LMT 1908 Jul 28
-5 PE -05/-04
Z Atlantic/South_Georgia -2:26:8 - LMT 1890
-2 - -02
Z America/Paramaribo -3:40:40 - LMT 1911
-3:40:52 - PMT 1935
-3:40:36 - PMT 1945 O
-3:30 - -0330 1984 O
-3 - -03
Z America/Port_of_Spain -4:6:4 - LMT 1912 Mar 2
-4 - AST
L America/Port_of_Spain America/Anguilla
L America/Port_of_Spain America/Antigua
L America/Port_of_Spain America/Dominica
L America/Port_of_Spain America/Grenada
L America/Port_of_Spain America/Guadeloupe
L America/Port_of_Spain America/Marigot
L America/Port_of_Spain America/Montserrat
L America/Port_of_Spain America/St_Barthelemy
L America/Port_of_Spain America/St_Kitts
L America/Port_of_Spain America/St_Lucia
L America/Port_of_Spain America/St_Thomas
L America/Port_of_Spain America/St_Vincent
L America/Port_of_Spain America/Tortola
R U 1923 1925 - O 1 0 0:30 -
R U 1924 1926 - Ap 1 0 0 -
R U 1933 1938 - O lastSu 0 0:30 -
R U 1934 1941 - Mar lastSa 24 0 -
R U 1939 o - O 1 0 0:30 -
R U 1940 o - O 27 0 0:30 -
R U 1941 o - Au 1 0 0:30 -
R U 1942 o - D 14 0 0:30 -
R U 1943 o - Mar 14 0 0 -
R U 1959 o - May 24 0 0:30 -
R U 1959 o - N 15 0 0 -
R U 1960 o - Ja 17 0 1 -
R U 1960 o - Mar 6 0 0 -
R U 1965 o - Ap 4 0 1 -
R U 1965 o - S 26 0 0 -
R U 1968 o - May 27 0 0:30 -
R U 1968 o - D 1 0 0 -
R U 1970 o - Ap 25 0 1 -
R U 1970 o - Jun 14 0 0 -
R U 1972 o - Ap 23 0 1 -
R U 1972 o - Jul 16 0 0 -
R U 1974 o - Ja 13 0 1:30 -
R U 1974 o - Mar 10 0 0:30 -
R U 1974 o - S 1 0 0 -
R U 1974 o - D 22 0 1 -
R U 1975 o - Mar 30 0 0 -
R U 1976 o - D 19 0 1 -
R U 1977 o - Mar 6 0 0 -
R U 1977 o - D 4 0 1 -
R U 1978 1979 - Mar Su>=1 0 0 -
R U 1978 o - D 17 0 1 -
R U 1979 o - Ap 29 0 1 -
R U 1980 o - Mar 16 0 0 -
R U 1987 o - D 14 0 1 -
R U 1988 o - F 28 0 0 -
R U 1988 o - D 11 0 1 -
R U 1989 o - Mar 5 0 0 -
R U 1989 o - O 29 0 1 -
R U 1990 o - F 25 0 0 -
R U 1990 1991 - O Su>=21 0 1 -
R U 1991 1992 - Mar Su>=1 0 0 -
R U 1992 o - O 18 0 1 -
R U 1993 o - F 28 0 0 -
R U 2004 o - S 19 0 1 -
R U 2005 o - Mar 27 2 0 -
R U 2005 o - O 9 2 1 -
R U 2006 2015 - Mar Su>=8 2 0 -
R U 2006 2014 - O Su>=1 2 1 -
Z America/Montevideo -3:44:51 - LMT 1908 Jun 10
-3:44:51 - MMT 1920 May
-4 - -04 1923 O
-3:30 U -0330/-03 1942 D 14
-3 U -03/-0230 1960
-3 U -03/-02 1968
-3 U -03/-0230 1970
-3 U -03/-02 1974
-3 U -03/-0130 1974 Mar 10
-3 U -03/-0230 1974 D 22
-3 U -03/-02
Z America/Caracas -4:27:44 - LMT 1890
-4:27:40 - CMT 1912 F 12
-4:30 - -0430 1965
-4 - -04 2007 D 9 3
-4:30 - -0430 2016 May 1 2:30
-4 - -04
Z Etc/GMT 0 - GMT
Z Etc/UTC 0 - UTC
L Etc/GMT GMT
L Etc/UTC Etc/Universal
L Etc/UTC Etc/Zulu
L Etc/GMT Etc/Greenwich
L Etc/GMT Etc/GMT-0
L Etc/GMT Etc/GMT+0
L Etc/GMT Etc/GMT0
Z Etc/GMT-14 14 - +14
Z Etc/GMT-13 13 - +13
Z Etc/GMT-12 12 - +12
Z Etc/GMT-11 11 - +11
Z Etc/GMT-10 10 - +10
Z Etc/GMT-9 9 - +09
Z Etc/GMT-8 8 - +08
Z Etc/GMT-7 7 - +07
Z Etc/GMT-6 6 - +06
Z Etc/GMT-5 5 - +05
Z Etc/GMT-4 4 - +04
Z Etc/GMT-3 3 - +03
Z Etc/GMT-2 2 - +02
Z Etc/GMT-1 1 - +01
Z Etc/GMT+1 -1 - -01
Z Etc/GMT+2 -2 - -02
Z Etc/GMT+3 -3 - -03
Z Etc/GMT+4 -4 - -04
Z Etc/GMT+5 -5 - -05
Z Etc/GMT+6 -6 - -06
Z Etc/GMT+7 -7 - -07
Z Etc/GMT+8 -8 - -08
Z Etc/GMT+9 -9 - -09
Z Etc/GMT+10 -10 - -10
Z Etc/GMT+11 -11 - -11
Z Etc/GMT+12 -12 - -12
Z Factory 0 - -00
L Africa/Nairobi Africa/Asmera
L Africa/Abidjan Africa/Timbuktu
L America/Argentina/Catamarca America/Argentina/ComodRivadavia
L America/Adak America/Atka
L America/Argentina/Buenos_Aires America/Buenos_Aires
L America/Argentina/Catamarca America/Catamarca
L America/Atikokan America/Coral_Harbour
L America/Argentina/Cordoba America/Cordoba
L America/Tijuana America/Ensenada
L America/Indiana/Indianapolis America/Fort_Wayne
L America/Nuuk America/Godthab
L America/Indiana/Indianapolis America/Indianapolis
L America/Argentina/Jujuy America/Jujuy
L America/Indiana/Knox America/Knox_IN
L America/Kentucky/Louisville America/Louisville
L America/Argentina/Mendoza America/Mendoza
L America/Toronto America/Montreal
L America/Rio_Branco America/Porto_Acre
L America/Argentina/Cordoba America/Rosario
L America/Tijuana America/Santa_Isabel
L America/Denver America/Shiprock
L America/Port_of_Spain America/Virgin
L Pacific/Auckland Antarctica/South_Pole
L Asia/Ashgabat Asia/Ashkhabad
L Asia/Kolkata Asia/Calcutta
L Asia/Shanghai Asia/Chongqing
L Asia/Shanghai Asia/Chungking
L Asia/Dhaka Asia/Dacca
L Asia/Shanghai Asia/Harbin
L Asia/Urumqi Asia/Kashgar
L Asia/Kathmandu Asia/Katmandu
L Asia/Macau Asia/Macao
L Asia/Yangon Asia/Rangoon
L Asia/Ho_Chi_Minh Asia/Saigon
L Asia/Jerusalem Asia/Tel_Aviv
L Asia/Thimphu Asia/Thimbu
L Asia/Makassar Asia/Ujung_Pandang
L Asia/Ulaanbaatar Asia/Ulan_Bator
L Atlantic/Faroe Atlantic/Faeroe
L Europe/Oslo Atlantic/Jan_Mayen
L Australia/Sydney Australia/ACT
L Australia/Sydney Australia/Canberra
L Australia/Lord_Howe Australia/LHI
L Australia/Sydney Australia/NSW
L Australia/Darwin Australia/North
L Australia/Brisbane Australia/Queensland
L Australia/Adelaide Australia/South
L Australia/Hobart Australia/Tasmania
L Australia/Melbourne Australia/Victoria
L Australia/Perth Australia/West
L Australia/Broken_Hill Australia/Yancowinna
L America/Rio_Branco Brazil/Acre
L America/Noronha Brazil/DeNoronha
L America/Sao_Paulo Brazil/East
L America/Manaus Brazil/West
L America/Halifax Canada/Atlantic
L America/Winnipeg Canada/Central
L America/Toronto Canada/Eastern
L America/Edmonton Canada/Mountain
L America/St_Johns Canada/Newfoundland
L America/Vancouver Canada/Pacific
L America/Regina Canada/Saskatchewan
L America/Whitehorse Canada/Yukon
L America/Santiago Chile/Continental
L Pacific/Easter Chile/EasterIsland
L America/Havana Cuba
L Africa/Cairo Egypt
L Europe/Dublin Eire
L Etc/UTC Etc/UCT
L Europe/London Europe/Belfast
L Europe/Chisinau Europe/Tiraspol
L Europe/London GB
L Europe/London GB-Eire
L Etc/GMT GMT+0
L Etc/GMT GMT-0
L Etc/GMT GMT0
L Etc/GMT Greenwich
L Asia/Hong_Kong Hongkong
L Atlantic/Reykjavik Iceland
L Asia/Tehran Iran
L Asia/Jerusalem Israel
L America/Jamaica Jamaica
L Asia/Tokyo Japan
L Pacific/Kwajalein Kwajalein
L Africa/Tripoli Libya
L America/Tijuana Mexico/BajaNorte
L America/Mazatlan Mexico/BajaSur
L America/Mexico_City Mexico/General
L Pacific/Auckland NZ
L Pacific/Chatham NZ-CHAT
L America/Denver Navajo
L Asia/Shanghai PRC
L Pacific/Honolulu Pacific/Johnston
L Pacific/Pohnpei Pacific/Ponape
L Pacific/Pago_Pago Pacific/Samoa
L Pacific/Chuuk Pacific/Truk
L Pacific/Chuuk Pacific/Yap
L Europe/Warsaw Poland
L Europe/Lisbon Portugal
L Asia/Taipei ROC
L Asia/Seoul ROK
L Asia/Singapore Singapore
L Europe/Istanbul Turkey
L Etc/UTC UCT
L America/Anchorage US/Alaska
L America/Adak US/Aleutian
L America/Phoenix US/Arizona
L America/Chicago US/Central
L America/Indiana/Indianapolis US/East-Indiana
L America/New_York US/Eastern
L Pacific/Honolulu US/Hawaii
L America/Indiana/Knox US/Indiana-Starke
L America/Detroit US/Michigan
L America/Denver US/Mountain
L America/Los_Angeles US/Pacific
L Pacific/Pago_Pago US/Samoa
L Etc/UTC UTC
L Etc/UTC Universal
L Europe/Moscow W-SU
L Etc/UTC Zulu
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Factory
|
TZif2 -00 TZif2 -00
<-00>0
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/UTC
|
TZif2 UTC TZif2 UTC
UTC0
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Etc/Universal
|
TZif2 UTC TZif2 UTC
UTC0
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Etc/Greenwich
|
TZif2 GMT TZif2 GMT
GMT0
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Etc/GMT-6
|
TZif2 T` +06 TZif2 T` +06
<+06>-6
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Etc/GMT-1
|
TZif2 +01 TZif2 +01
<+01>-1
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Etc/GMT-0
|
TZif2 GMT TZif2 GMT
GMT0
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Etc/GMT-7
|
TZif2 bp +07 TZif2 bp +07
<+07>-7
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Etc/GMT
|
TZif2 GMT TZif2 GMT
GMT0
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Etc/Zulu
|
TZif2 UTC TZif2 UTC
UTC0
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Etc/GMT0
|
TZif2 GMT TZif2 GMT
GMT0
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Etc/UCT
|
TZif2 UTC TZif2 UTC
UTC0
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.