repo_id
stringclasses
208 values
file_path
stringlengths
31
190
content
stringlengths
1
2.65M
__index_level_0__
int64
0
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip-20.2.3.dist-info/WHEEL
Wheel-Version: 1.0 Generator: bdist_wheel (0.35.1) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip-20.2.3.dist-info/entry_points.txt
[console_scripts] pip = pip._internal.cli.main:main pip3 = pip._internal.cli.main:main pip3.8 = pip._internal.cli.main:main
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip-20.2.3.dist-info/top_level.txt
pip
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip-20.2.3.dist-info/LICENSE.txt
Copyright (c) 2008-2019 The pip developers (see AUTHORS.txt file) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip-20.2.3.dist-info/INSTALLER
pip
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip-20.2.3.dist-info/METADATA
Metadata-Version: 2.1 Name: pip Version: 20.2.3 Summary: The PyPA recommended tool for installing Python packages. Home-page: https://pip.pypa.io/ Author: The pip developers Author-email: distutils-sig@python.org License: MIT Project-URL: Documentation, https://pip.pypa.io Project-URL: Source, https://github.com/pypa/pip Project-URL: Changelog, https://pip.pypa.io/en/stable/news/ Keywords: distutils easy_install egg setuptools wheel virtualenv Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Topic :: Software Development :: Build Tools Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.* pip - The Python Package Installer ================================== .. image:: https://img.shields.io/pypi/v/pip.svg :target: https://pypi.org/project/pip/ .. image:: https://readthedocs.org/projects/pip/badge/?version=latest :target: https://pip.pypa.io/en/latest pip is the `package installer`_ for Python. You can use pip to install packages from the `Python Package Index`_ and other indexes. Please take a look at our documentation for how to install and use pip: * `Installation`_ * `Usage`_ We release updates regularly, with a new version every 3 months. Find more details in our documentation: * `Release notes`_ * `Release process`_ In 2020, we're working on improvements to the heart of pip. Please `learn more and take our survey`_ to help us do it right. If you find bugs, need help, or want to talk to the developers, please use our mailing lists or chat rooms: * `Issue tracking`_ * `Discourse channel`_ * `User IRC`_ If you want to get involved head over to GitHub to get the source code, look at our development documentation and feel free to jump on the developer mailing lists and chat rooms: * `GitHub page`_ * `Development documentation`_ * `Development mailing list`_ * `Development IRC`_ Code of Conduct --------------- Everyone interacting in the pip project's codebases, issue trackers, chat rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_. .. _package installer: https://packaging.python.org/guides/tool-recommendations/ .. _Python Package Index: https://pypi.org .. _Installation: https://pip.pypa.io/en/stable/installing.html .. _Usage: https://pip.pypa.io/en/stable/ .. _Release notes: https://pip.pypa.io/en/stable/news.html .. _Release process: https://pip.pypa.io/en/latest/development/release-process/ .. _GitHub page: https://github.com/pypa/pip .. _Development documentation: https://pip.pypa.io/en/latest/development .. _learn more and take our survey: https://pyfound.blogspot.com/2020/03/new-pip-resolver-to-roll-out-this-year.html .. _Issue tracking: https://github.com/pypa/pip/issues .. _Discourse channel: https://discuss.python.org/c/packaging .. _Development mailing list: https://mail.python.org/mailman3/lists/distutils-sig.python.org/ .. _User IRC: https://webchat.freenode.net/?channels=%23pypa .. _Development IRC: https://webchat.freenode.net/?channels=%23pypa-dev .. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/toml-0.10.2.dist-info/RECORD
toml-0.10.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 toml-0.10.2.dist-info/LICENSE,sha256=LZKUgj32yJNXyL5JJ_znk2HWVh5e51MtWSbmOTmqpTY,1252 toml-0.10.2.dist-info/METADATA,sha256=n_YkspvEihd_QXLIZZ50WVSFz3rZ_k7jQP-OU1WUpWY,7142 toml-0.10.2.dist-info/RECORD,, toml-0.10.2.dist-info/WHEEL,sha256=ADKeyaGyKF5DwBNE0sRE5pvW-bSkFMJfBuhzZ3rceP4,110 toml-0.10.2.dist-info/top_level.txt,sha256=2BO8ZRNnvJWgXyiQv66LBb_v87qBzcoUtEBefA75Ouk,5 toml/__init__.py,sha256=Au3kqCwKD0cjbf4yJGOpUFwpsY0WHsC1ZRGvWgIKmpc,723 toml/__pycache__/__init__.cpython-39.pyc,, toml/__pycache__/decoder.cpython-39.pyc,, toml/__pycache__/encoder.cpython-39.pyc,, toml/__pycache__/ordered.cpython-39.pyc,, toml/__pycache__/tz.cpython-39.pyc,, toml/decoder.py,sha256=hSGTLf-2WBDZ_ddoCHWFy6N647XyMSh1o3rN2o4dEFg,38942 toml/encoder.py,sha256=XjBc8ayvvlsLyd_qDA4tMWDNmMFRS4DpwtuDSWBq7zo,9940 toml/ordered.py,sha256=mz03lZmV0bmc9lsYRIUOuj7Dsu5Ptwq-UtGVq5FdVZ4,354 toml/tz.py,sha256=-5vg8wkg_atnVi2TnEveexIVE7T_FxBVr_-2WVfO1oA,701
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/toml-0.10.2.dist-info/LICENSE
The MIT License Copyright 2013-2019 William Pearson Copyright 2015-2016 Julien Enselme Copyright 2016 Google Inc. Copyright 2017 Samuel Vasko Copyright 2017 Nate Prewitt Copyright 2017 Jack Evans Copyright 2019 Filippo Broggini Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/toml-0.10.2.dist-info/WHEEL
Wheel-Version: 1.0 Generator: bdist_wheel (0.35.1) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/toml-0.10.2.dist-info/top_level.txt
toml
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/toml-0.10.2.dist-info/INSTALLER
pip
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/toml-0.10.2.dist-info/METADATA
Metadata-Version: 2.1 Name: toml Version: 0.10.2 Summary: Python Library for Tom's Obvious, Minimal Language Home-page: https://github.com/uiri/toml Author: William Pearson Author-email: uiri@xqz.ca License: MIT Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.* **** TOML **** .. image:: https://img.shields.io/pypi/v/toml :target: https://pypi.org/project/toml/ .. image:: https://travis-ci.org/uiri/toml.svg?branch=master :target: https://travis-ci.org/uiri/toml .. image:: https://img.shields.io/pypi/pyversions/toml.svg :target: https://pypi.org/project/toml/ A Python library for parsing and creating `TOML <https://en.wikipedia.org/wiki/TOML>`_. The module passes `the TOML test suite <https://github.com/BurntSushi/toml-test>`_. See also: * `The TOML Standard <https://github.com/toml-lang/toml>`_ * `The currently supported TOML specification <https://github.com/toml-lang/toml/blob/v0.5.0/README.md>`_ Installation ============ To install the latest release on `PyPI <https://pypi.org/project/toml/>`_, simply run: :: pip install toml Or to install the latest development version, run: :: git clone https://github.com/uiri/toml.git cd toml python setup.py install Quick Tutorial ============== *toml.loads* takes in a string containing standard TOML-formatted data and returns a dictionary containing the parsed data. .. code:: pycon >>> import toml >>> toml_string = """ ... # This is a TOML document. ... ... title = "TOML Example" ... ... [owner] ... name = "Tom Preston-Werner" ... dob = 1979-05-27T07:32:00-08:00 # First class dates ... ... [database] ... server = "192.168.1.1" ... ports = [ 8001, 8001, 8002 ] ... connection_max = 5000 ... enabled = true ... ... [servers] ... ... # Indentation (tabs and/or spaces) is allowed but not required ... [servers.alpha] ... ip = "10.0.0.1" ... dc = "eqdc10" ... ... [servers.beta] ... ip = "10.0.0.2" ... dc = "eqdc10" ... ... [clients] ... data = [ ["gamma", "delta"], [1, 2] ] ... ... # Line breaks are OK when inside arrays ... hosts = [ ... "alpha", ... "omega" ... ] ... """ >>> parsed_toml = toml.loads(toml_string) *toml.dumps* takes a dictionary and returns a string containing the corresponding TOML-formatted data. .. code:: pycon >>> new_toml_string = toml.dumps(parsed_toml) >>> print(new_toml_string) title = "TOML Example" [owner] name = "Tom Preston-Werner" dob = 1979-05-27T07:32:00Z [database] server = "192.168.1.1" ports = [ 8001, 8001, 8002,] connection_max = 5000 enabled = true [clients] data = [ [ "gamma", "delta",], [ 1, 2,],] hosts = [ "alpha", "omega",] [servers.alpha] ip = "10.0.0.1" dc = "eqdc10" [servers.beta] ip = "10.0.0.2" dc = "eqdc10" *toml.dump* takes a dictionary and a file descriptor and returns a string containing the corresponding TOML-formatted data. .. code:: pycon >>> with open('new_toml_file.toml', 'w') as f: ... new_toml_string = toml.dump(parsed_toml, f) >>> print(new_toml_string) title = "TOML Example" [owner] name = "Tom Preston-Werner" dob = 1979-05-27T07:32:00Z [database] server = "192.168.1.1" ports = [ 8001, 8001, 8002,] connection_max = 5000 enabled = true [clients] data = [ [ "gamma", "delta",], [ 1, 2,],] hosts = [ "alpha", "omega",] [servers.alpha] ip = "10.0.0.1" dc = "eqdc10" [servers.beta] ip = "10.0.0.2" dc = "eqdc10" For more functions, view the API Reference below. Note ---- For Numpy users, by default the data types ``np.floatX`` will not be translated to floats by toml, but will instead be encoded as strings. To get around this, specify the ``TomlNumpyEncoder`` when saving your data. .. code:: pycon >>> import toml >>> import numpy as np >>> a = np.arange(0, 10, dtype=np.double) >>> output = {'a': a} >>> toml.dumps(output) 'a = [ "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "6.0", "7.0", "8.0", "9.0",]\n' >>> toml.dumps(output, encoder=toml.TomlNumpyEncoder()) 'a = [ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,]\n' API Reference ============= ``toml.load(f, _dict=dict)`` Parse a file or a list of files as TOML and return a dictionary. :Args: * ``f``: A path to a file, list of filepaths (to be read into single object) or a file descriptor * ``_dict``: The class of the dictionary object to be returned :Returns: A dictionary (or object ``_dict``) containing parsed TOML data :Raises: * ``TypeError``: When ``f`` is an invalid type or is a list containing invalid types * ``TomlDecodeError``: When an error occurs while decoding the file(s) ``toml.loads(s, _dict=dict)`` Parse a TOML-formatted string to a dictionary. :Args: * ``s``: The TOML-formatted string to be parsed * ``_dict``: Specifies the class of the returned toml dictionary :Returns: A dictionary (or object ``_dict``) containing parsed TOML data :Raises: * ``TypeError``: When a non-string object is passed * ``TomlDecodeError``: When an error occurs while decoding the TOML-formatted string ``toml.dump(o, f, encoder=None)`` Write a dictionary to a file containing TOML-formatted data :Args: * ``o``: An object to be converted into TOML * ``f``: A File descriptor where the TOML-formatted output should be stored * ``encoder``: An instance of ``TomlEncoder`` (or subclass) for encoding the object. If ``None``, will default to ``TomlEncoder`` :Returns: A string containing the TOML-formatted data corresponding to object ``o`` :Raises: * ``TypeError``: When anything other than file descriptor is passed ``toml.dumps(o, encoder=None)`` Create a TOML-formatted string from an input object :Args: * ``o``: An object to be converted into TOML * ``encoder``: An instance of ``TomlEncoder`` (or subclass) for encoding the object. If ``None``, will default to ``TomlEncoder`` :Returns: A string containing the TOML-formatted data corresponding to object ``o`` Licensing ========= This project is released under the terms of the MIT Open Source License. View *LICENSE.txt* for more information.
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/Jinja2-2.11.2.dist-info/RECORD
Jinja2-2.11.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 Jinja2-2.11.2.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475 Jinja2-2.11.2.dist-info/METADATA,sha256=5ZHRZoIRAMHsJPnqhlJ622_dRPsYePYJ-9EH4-Ry7yI,3535 Jinja2-2.11.2.dist-info/RECORD,, Jinja2-2.11.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 Jinja2-2.11.2.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110 Jinja2-2.11.2.dist-info/entry_points.txt,sha256=Qy_DkVo6Xj_zzOtmErrATe8lHZhOqdjpt3e4JJAGyi8,61 Jinja2-2.11.2.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7 jinja2/__init__.py,sha256=0QCM_jKKDM10yzSdHRVV4mQbCbDqf0GN0GirAqibn9Y,1549 jinja2/__pycache__/__init__.cpython-39.pyc,, jinja2/__pycache__/_compat.cpython-39.pyc,, jinja2/__pycache__/_identifier.cpython-39.pyc,, jinja2/__pycache__/asyncfilters.cpython-39.pyc,, jinja2/__pycache__/asyncsupport.cpython-39.pyc,, jinja2/__pycache__/bccache.cpython-39.pyc,, jinja2/__pycache__/compiler.cpython-39.pyc,, jinja2/__pycache__/constants.cpython-39.pyc,, jinja2/__pycache__/debug.cpython-39.pyc,, jinja2/__pycache__/defaults.cpython-39.pyc,, jinja2/__pycache__/environment.cpython-39.pyc,, jinja2/__pycache__/exceptions.cpython-39.pyc,, jinja2/__pycache__/ext.cpython-39.pyc,, jinja2/__pycache__/filters.cpython-39.pyc,, jinja2/__pycache__/idtracking.cpython-39.pyc,, jinja2/__pycache__/lexer.cpython-39.pyc,, jinja2/__pycache__/loaders.cpython-39.pyc,, jinja2/__pycache__/meta.cpython-39.pyc,, jinja2/__pycache__/nativetypes.cpython-39.pyc,, jinja2/__pycache__/nodes.cpython-39.pyc,, jinja2/__pycache__/optimizer.cpython-39.pyc,, jinja2/__pycache__/parser.cpython-39.pyc,, jinja2/__pycache__/runtime.cpython-39.pyc,, jinja2/__pycache__/sandbox.cpython-39.pyc,, jinja2/__pycache__/tests.cpython-39.pyc,, jinja2/__pycache__/utils.cpython-39.pyc,, jinja2/__pycache__/visitor.cpython-39.pyc,, jinja2/_compat.py,sha256=B6Se8HjnXVpzz9-vfHejn-DV2NjaVK-Iewupc5kKlu8,3191 jinja2/_identifier.py,sha256=EdgGJKi7O1yvr4yFlvqPNEqV6M1qHyQr8Gt8GmVTKVM,1775 jinja2/asyncfilters.py,sha256=XJtYXTxFvcJ5xwk6SaDL4S0oNnT0wPYvXBCSzc482fI,4250 jinja2/asyncsupport.py,sha256=ZBFsDLuq3Gtji3Ia87lcyuDbqaHZJRdtShZcqwpFnSQ,7209 jinja2/bccache.py,sha256=3Pmp4jo65M9FQuIxdxoDBbEDFwe4acDMQf77nEJfrHA,12139 jinja2/compiler.py,sha256=Ta9W1Lit542wItAHXlDcg0sEOsFDMirCdlFPHAurg4o,66284 jinja2/constants.py,sha256=RR1sTzNzUmKco6aZicw4JpQpJGCuPuqm1h1YmCNUEFY,1458 jinja2/debug.py,sha256=neR7GIGGjZH3_ILJGVUYy3eLQCCaWJMXOb7o0kGInWc,8529 jinja2/defaults.py,sha256=85B6YUUCyWPSdrSeVhcqFVuu_bHUAQXeey--FIwSeVQ,1126 jinja2/environment.py,sha256=XDSLKc4SqNLMOwTSq3TbWEyA5WyXfuLuVD0wAVjEFwM,50629 jinja2/exceptions.py,sha256=VjNLawcmf2ODffqVMCQK1cRmvFaUfQWF4u8ouP3QPcE,5425 jinja2/ext.py,sha256=AtwL5O5enT_L3HR9-oBvhGyUTdGoyaqG_ICtnR_EVd4,26441 jinja2/filters.py,sha256=_RpPgAlgIj7ExvyDzcHAC3B36cocfWK-1TEketbNeM0,41415 jinja2/idtracking.py,sha256=J3O4VHsrbf3wzwiBc7Cro26kHb6_5kbULeIOzocchIU,9211 jinja2/lexer.py,sha256=nUFLRKhhKmmEWkLI65nQePgcQs7qsRdjVYZETMt_v0g,30331 jinja2/loaders.py,sha256=C-fST_dmFjgWkp0ZuCkrgICAoOsoSIF28wfAFink0oU,17666 jinja2/meta.py,sha256=QjyYhfNRD3QCXjBJpiPl9KgkEkGXJbAkCUq4-Ur10EQ,4131 jinja2/nativetypes.py,sha256=Ul__gtVw4xH-0qvUvnCNHedQeNDwmEuyLJztzzSPeRg,2753 jinja2/nodes.py,sha256=Mk1oJPVgIjnQw9WOqILvcu3rLepcFZ0ahxQm2mbwDwc,31095 jinja2/optimizer.py,sha256=gQLlMYzvQhluhzmAIFA1tXS0cwgWYOjprN-gTRcHVsc,1457 jinja2/parser.py,sha256=fcfdqePNTNyvosIvczbytVA332qpsURvYnCGcjDHSkA,35660 jinja2/runtime.py,sha256=0y-BRyIEZ9ltByL2Id6GpHe1oDRQAwNeQvI0SKobNMw,30618 jinja2/sandbox.py,sha256=knayyUvXsZ-F0mk15mO2-ehK9gsw04UhB8td-iUOtLc,17127 jinja2/tests.py,sha256=iO_Y-9Vo60zrVe1lMpSl5sKHqAxe2leZHC08OoZ8K24,4799 jinja2/utils.py,sha256=OoVMlQe9S2-lWT6jJbTu9tDuDvGNyWUhHDcE51i5_Do,22522 jinja2/visitor.py,sha256=DUHupl0a4PGp7nxRtZFttUzAi1ccxzqc2hzetPYUz8U,3240
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/Jinja2-2.11.2.dist-info/WHEEL
Wheel-Version: 1.0 Generator: bdist_wheel (0.34.2) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/Jinja2-2.11.2.dist-info/entry_points.txt
[babel.extractors] jinja2 = jinja2.ext:babel_extract [i18n]
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/Jinja2-2.11.2.dist-info/LICENSE.rst
Copyright 2007 Pallets Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/Jinja2-2.11.2.dist-info/top_level.txt
jinja2
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/Jinja2-2.11.2.dist-info/INSTALLER
pip
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/Jinja2-2.11.2.dist-info/METADATA
Metadata-Version: 2.1 Name: Jinja2 Version: 2.11.2 Summary: A very fast and expressive template engine. Home-page: https://palletsprojects.com/p/jinja/ Author: Armin Ronacher Author-email: armin.ronacher@active-4.com Maintainer: Pallets Maintainer-email: contact@palletsprojects.com License: BSD-3-Clause Project-URL: Documentation, https://jinja.palletsprojects.com/ Project-URL: Code, https://github.com/pallets/jinja Project-URL: Issue tracker, https://github.com/pallets/jinja/issues Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: Web Environment Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Topic :: Text Processing :: Markup :: HTML Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.* Description-Content-Type: text/x-rst Requires-Dist: MarkupSafe (>=0.23) Provides-Extra: i18n Requires-Dist: Babel (>=0.8) ; extra == 'i18n' Jinja ===== Jinja is a fast, expressive, extensible templating engine. Special placeholders in the template allow writing code similar to Python syntax. Then the template is passed data to render the final document. It includes: - Template inheritance and inclusion. - Define and import macros within templates. - HTML templates can use autoescaping to prevent XSS from untrusted user input. - A sandboxed environment can safely render untrusted templates. - AsyncIO support for generating templates and calling async functions. - I18N support with Babel. - Templates are compiled to optimized Python code just-in-time and cached, or can be compiled ahead-of-time. - Exceptions point to the correct line in templates to make debugging easier. - Extensible filters, tests, functions, and even syntax. Jinja's philosophy is that while application logic belongs in Python if possible, it shouldn't make the template designer's job difficult by restricting functionality too much. Installing ---------- Install and update using `pip`_: .. code-block:: text $ pip install -U Jinja2 .. _pip: https://pip.pypa.io/en/stable/quickstart/ In A Nutshell ------------- .. code-block:: jinja {% extends "base.html" %} {% block title %}Members{% endblock %} {% block content %} <ul> {% for user in users %} <li><a href="{{ user.url }}">{{ user.username }}</a></li> {% endfor %} </ul> {% endblock %} Links ----- - Website: https://palletsprojects.com/p/jinja/ - Documentation: https://jinja.palletsprojects.com/ - Releases: https://pypi.org/project/Jinja2/ - Code: https://github.com/pallets/jinja - Issue tracker: https://github.com/pallets/jinja/issues - Test status: https://dev.azure.com/pallets/jinja/_build - Official chat: https://discord.gg/t6rrQZH
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/six-1.15.0.dist-info/RECORD
__pycache__/six.cpython-39.pyc,, six-1.15.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 six-1.15.0.dist-info/LICENSE,sha256=i7hQxWWqOJ_cFvOkaWWtI9gq3_YPI5P8J2K2MYXo5sk,1066 six-1.15.0.dist-info/METADATA,sha256=W6rlyoeMZHXh6srP9NXNsm0rjAf_660re8WdH5TBT8E,1795 six-1.15.0.dist-info/RECORD,, six-1.15.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 six-1.15.0.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110 six-1.15.0.dist-info/top_level.txt,sha256=_iVH_iYEtEXnD8nYGQYpYFUvkUW9sEO1GYbkeKSAais,4 six.py,sha256=U4Z_yv534W5CNyjY9i8V1OXY2SjAny8y2L5vDLhhThM,34159
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/six-1.15.0.dist-info/LICENSE
Copyright (c) 2010-2020 Benjamin Peterson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/six-1.15.0.dist-info/WHEEL
Wheel-Version: 1.0 Generator: bdist_wheel (0.34.2) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/six-1.15.0.dist-info/top_level.txt
six
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/six-1.15.0.dist-info/INSTALLER
pip
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/six-1.15.0.dist-info/METADATA
Metadata-Version: 2.1 Name: six Version: 1.15.0 Summary: Python 2 and 3 compatibility utilities Home-page: https://github.com/benjaminp/six Author: Benjamin Peterson Author-email: benjamin@python.org License: MIT Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Topic :: Software Development :: Libraries Classifier: Topic :: Utilities Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.* .. image:: https://img.shields.io/pypi/v/six.svg :target: https://pypi.org/project/six/ :alt: six on PyPI .. image:: https://travis-ci.org/benjaminp/six.svg?branch=master :target: https://travis-ci.org/benjaminp/six :alt: six on TravisCI .. image:: https://readthedocs.org/projects/six/badge/?version=latest :target: https://six.readthedocs.io/ :alt: six's documentation on Read the Docs .. image:: https://img.shields.io/badge/license-MIT-green.svg :target: https://github.com/benjaminp/six/blob/master/LICENSE :alt: MIT License badge Six is a Python 2 and 3 compatibility library. It provides utility functions for smoothing over the differences between the Python versions with the goal of writing Python code that is compatible on both Python versions. See the documentation for more information on what is provided. Six supports Python 2.7 and 3.3+. It is contained in only one Python file, so it can be easily copied into your project. (The copyright and license notice must be retained.) Online documentation is at https://six.readthedocs.io/. Bugs can be reported to https://github.com/benjaminp/six. The code can also be found there.
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_cors/version.py
__version__ = '3.0.10'
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_cors/decorator.py
# -*- coding: utf-8 -*- """ decorator ~~~~ This unit exposes a single decorator which should be used to wrap a Flask route with. It accepts all parameters and options as the CORS extension. :copyright: (c) 2016 by Cory Dolphin. :license: MIT, see LICENSE for more details. """ from functools import update_wrapper from flask import make_response, request, current_app from .core import * LOG = logging.getLogger(__name__) def cross_origin(*args, **kwargs): """ This function is the decorator which is used to wrap a Flask route with. In the simplest case, simply use the default parameters to allow all origins in what is the most permissive configuration. If this method modifies state or performs authentication which may be brute-forced, you should add some degree of protection, such as Cross Site Forgery Request protection. :param origins: The origin, or list of origins to allow requests from. The origin(s) may be regular expressions, case-sensitive strings, or else an asterisk Default : '*' :type origins: list, string or regex :param methods: The method or list of methods which the allowed origins are allowed to access for non-simple requests. Default : [GET, HEAD, POST, OPTIONS, PUT, PATCH, DELETE] :type methods: list or string :param expose_headers: The header or list which are safe to expose to the API of a CORS API specification. Default : None :type expose_headers: list or string :param allow_headers: The header or list of header field names which can be used when this resource is accessed by allowed origins. The header(s) may be regular expressions, case-sensitive strings, or else an asterisk. Default : '*', allow all headers :type allow_headers: list, string or regex :param supports_credentials: Allows users to make authenticated requests. If true, injects the `Access-Control-Allow-Credentials` header in responses. This allows cookies and credentials to be submitted across domains. :note: This option cannot be used in conjuction with a '*' origin Default : False :type supports_credentials: bool :param max_age: The maximum time for which this CORS request maybe cached. This value is set as the `Access-Control-Max-Age` header. Default : None :type max_age: timedelta, integer, string or None :param send_wildcard: If True, and the origins parameter is `*`, a wildcard `Access-Control-Allow-Origin` header is sent, rather than the request's `Origin` header. Default : False :type send_wildcard: bool :param vary_header: If True, the header Vary: Origin will be returned as per the W3 implementation guidelines. Setting this header when the `Access-Control-Allow-Origin` is dynamically generated (e.g. when there is more than one allowed origin, and an Origin than '*' is returned) informs CDNs and other caches that the CORS headers are dynamic, and cannot be cached. If False, the Vary header will never be injected or altered. Default : True :type vary_header: bool :param automatic_options: Only applies to the `cross_origin` decorator. If True, Flask-CORS will override Flask's default OPTIONS handling to return CORS headers for OPTIONS requests. Default : True :type automatic_options: bool """ _options = kwargs def decorator(f): LOG.debug("Enabling %s for cross_origin using options:%s", f, _options) # If True, intercept OPTIONS requests by modifying the view function, # replicating Flask's default behavior, and wrapping the response with # CORS headers. # # If f.provide_automatic_options is unset or True, Flask's route # decorator (which is actually wraps the function object we return) # intercepts OPTIONS handling, and requests will not have CORS headers if _options.get('automatic_options', True): f.required_methods = getattr(f, 'required_methods', set()) f.required_methods.add('OPTIONS') f.provide_automatic_options = False def wrapped_function(*args, **kwargs): # Handle setting of Flask-Cors parameters options = get_cors_options(current_app, _options) if options.get('automatic_options') and request.method == 'OPTIONS': resp = current_app.make_default_options_response() else: resp = make_response(f(*args, **kwargs)) set_cors_headers(resp, options) setattr(resp, FLASK_CORS_EVALUATED, True) return resp return update_wrapper(wrapped_function, f) return decorator
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_cors/__init__.py
# -*- coding: utf-8 -*- """ flask_cors ~~~~ Flask-CORS is a simple extension to Flask allowing you to support cross origin resource sharing (CORS) using a simple decorator. :copyright: (c) 2016 by Cory Dolphin. :license: MIT, see LICENSE for more details. """ from .decorator import cross_origin from .extension import CORS from .version import __version__ __all__ = ['CORS', 'cross_origin'] # Set default logging handler to avoid "No handler found" warnings. import logging from logging import NullHandler # Set initial level to WARN. Users must manually enable logging for # flask_cors to see our logging. rootlogger = logging.getLogger(__name__) rootlogger.addHandler(NullHandler()) if rootlogger.level == logging.NOTSET: rootlogger.setLevel(logging.WARN)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_cors/core.py
# -*- coding: utf-8 -*- """ core ~~~~ Core functionality shared between the extension and the decorator. :copyright: (c) 2016 by Cory Dolphin. :license: MIT, see LICENSE for more details. """ import re import logging try: # on python 3 from collections.abc import Iterable except ImportError: # on python 2.7 and pypy from collections import Iterable from datetime import timedelta from six import string_types from flask import request, current_app from werkzeug.datastructures import Headers, MultiDict LOG = logging.getLogger(__name__) # Response Headers ACL_ORIGIN = 'Access-Control-Allow-Origin' ACL_METHODS = 'Access-Control-Allow-Methods' ACL_ALLOW_HEADERS = 'Access-Control-Allow-Headers' ACL_EXPOSE_HEADERS = 'Access-Control-Expose-Headers' ACL_CREDENTIALS = 'Access-Control-Allow-Credentials' ACL_MAX_AGE = 'Access-Control-Max-Age' # Request Header ACL_REQUEST_METHOD = 'Access-Control-Request-Method' ACL_REQUEST_HEADERS = 'Access-Control-Request-Headers' ALL_METHODS = ['GET', 'HEAD', 'POST', 'OPTIONS', 'PUT', 'PATCH', 'DELETE'] CONFIG_OPTIONS = ['CORS_ORIGINS', 'CORS_METHODS', 'CORS_ALLOW_HEADERS', 'CORS_EXPOSE_HEADERS', 'CORS_SUPPORTS_CREDENTIALS', 'CORS_MAX_AGE', 'CORS_SEND_WILDCARD', 'CORS_AUTOMATIC_OPTIONS', 'CORS_VARY_HEADER', 'CORS_RESOURCES', 'CORS_INTERCEPT_EXCEPTIONS', 'CORS_ALWAYS_SEND'] # Attribute added to request object by decorator to indicate that CORS # was evaluated, in case the decorator and extension are both applied # to a view. FLASK_CORS_EVALUATED = '_FLASK_CORS_EVALUATED' # Strange, but this gets the type of a compiled regex, which is otherwise not # exposed in a public API. RegexObject = type(re.compile('')) DEFAULT_OPTIONS = dict(origins='*', methods=ALL_METHODS, allow_headers='*', expose_headers=None, supports_credentials=False, max_age=None, send_wildcard=False, automatic_options=True, vary_header=True, resources=r'/*', intercept_exceptions=True, always_send=True) def parse_resources(resources): if isinstance(resources, dict): # To make the API more consistent with the decorator, allow a # resource of '*', which is not actually a valid regexp. resources = [(re_fix(k), v) for k, v in resources.items()] # Sort by regex length to provide consistency of matching and # to provide a proxy for specificity of match. E.G. longer # regular expressions are tried first. def pattern_length(pair): maybe_regex, _ = pair return len(get_regexp_pattern(maybe_regex)) return sorted(resources, key=pattern_length, reverse=True) elif isinstance(resources, string_types): return [(re_fix(resources), {})] elif isinstance(resources, Iterable): return [(re_fix(r), {}) for r in resources] # Type of compiled regex is not part of the public API. Test for this # at runtime. elif isinstance(resources, RegexObject): return [(re_fix(resources), {})] else: raise ValueError("Unexpected value for resources argument.") def get_regexp_pattern(regexp): """ Helper that returns regexp pattern from given value. :param regexp: regular expression to stringify :type regexp: _sre.SRE_Pattern or str :returns: string representation of given regexp pattern :rtype: str """ try: return regexp.pattern except AttributeError: return str(regexp) def get_cors_origins(options, request_origin): origins = options.get('origins') wildcard = r'.*' in origins # If the Origin header is not present terminate this set of steps. # The request is outside the scope of this specification.-- W3Spec if request_origin: LOG.debug("CORS request received with 'Origin' %s", request_origin) # If the allowed origins is an asterisk or 'wildcard', always match if wildcard and options.get('send_wildcard'): LOG.debug("Allowed origins are set to '*'. Sending wildcard CORS header.") return ['*'] # If the value of the Origin header is a case-sensitive match # for any of the values in list of origins elif try_match_any(request_origin, origins): LOG.debug("The request's Origin header matches. Sending CORS headers.", ) # Add a single Access-Control-Allow-Origin header, with either # the value of the Origin header or the string "*" as value. # -- W3Spec return [request_origin] else: LOG.debug("The request's Origin header does not match any of allowed origins.") return None elif options.get('always_send'): if wildcard: # If wildcard is in the origins, even if 'send_wildcard' is False, # simply send the wildcard. Unless supports_credentials is True, # since that is forbidded by the spec.. # It is the most-likely to be correct thing to do (the only other # option is to return nothing, which almost certainly not what # the developer wants if the '*' origin was specified. if options.get('supports_credentials'): return None else: return ['*'] else: # Return all origins that are not regexes. return sorted([o for o in origins if not probably_regex(o)]) # Terminate these steps, return the original request untouched. else: LOG.debug("The request did not contain an 'Origin' header. This means the browser or client did not request CORS, ensure the Origin Header is set.") return None def get_allow_headers(options, acl_request_headers): if acl_request_headers: request_headers = [h.strip() for h in acl_request_headers.split(',')] # any header that matches in the allow_headers matching_headers = filter( lambda h: try_match_any(h, options.get('allow_headers')), request_headers ) return ', '.join(sorted(matching_headers)) return None def get_cors_headers(options, request_headers, request_method): origins_to_set = get_cors_origins(options, request_headers.get('Origin')) headers = MultiDict() if not origins_to_set: # CORS is not enabled for this route return headers for origin in origins_to_set: headers.add(ACL_ORIGIN, origin) headers[ACL_EXPOSE_HEADERS] = options.get('expose_headers') if options.get('supports_credentials'): headers[ACL_CREDENTIALS] = 'true' # case sensative # This is a preflight request # http://www.w3.org/TR/cors/#resource-preflight-requests if request_method == 'OPTIONS': acl_request_method = request_headers.get(ACL_REQUEST_METHOD, '').upper() # If there is no Access-Control-Request-Method header or if parsing # failed, do not set any additional headers if acl_request_method and acl_request_method in options.get('methods'): # If method is not a case-sensitive match for any of the values in # list of methods do not set any additional headers and terminate # this set of steps. headers[ACL_ALLOW_HEADERS] = get_allow_headers(options, request_headers.get(ACL_REQUEST_HEADERS)) headers[ACL_MAX_AGE] = options.get('max_age') headers[ACL_METHODS] = options.get('methods') else: LOG.info("The request's Access-Control-Request-Method header does not match allowed methods. CORS headers will not be applied.") # http://www.w3.org/TR/cors/#resource-implementation if options.get('vary_header'): # Only set header if the origin returned will vary dynamically, # i.e. if we are not returning an asterisk, and there are multiple # origins that can be matched. if headers[ACL_ORIGIN] == '*': pass elif (len(options.get('origins')) > 1 or len(origins_to_set) > 1 or any(map(probably_regex, options.get('origins')))): headers.add('Vary', 'Origin') return MultiDict((k, v) for k, v in headers.items() if v) def set_cors_headers(resp, options): """ Performs the actual evaluation of Flas-CORS options and actually modifies the response object. This function is used both in the decorator and the after_request callback """ # If CORS has already been evaluated via the decorator, skip if hasattr(resp, FLASK_CORS_EVALUATED): LOG.debug('CORS have been already evaluated, skipping') return resp # Some libraries, like OAuthlib, set resp.headers to non Multidict # objects (Werkzeug Headers work as well). This is a problem because # headers allow repeated values. if (not isinstance(resp.headers, Headers) and not isinstance(resp.headers, MultiDict)): resp.headers = MultiDict(resp.headers) headers_to_set = get_cors_headers(options, request.headers, request.method) LOG.debug('Settings CORS headers: %s', str(headers_to_set)) for k, v in headers_to_set.items(): resp.headers.add(k, v) return resp def probably_regex(maybe_regex): if isinstance(maybe_regex, RegexObject): return True else: common_regex_chars = ['*', '\\', ']', '?', '$', '^', '[', ']', '(', ')'] # Use common characters used in regular expressions as a proxy # for if this string is in fact a regex. return any((c in maybe_regex for c in common_regex_chars)) def re_fix(reg): """ Replace the invalid regex r'*' with the valid, wildcard regex r'/.*' to enable the CORS app extension to have a more user friendly api. """ return r'.*' if reg == r'*' else reg def try_match_any(inst, patterns): return any(try_match(inst, pattern) for pattern in patterns) def try_match(request_origin, maybe_regex): """Safely attempts to match a pattern or string to a request origin.""" if isinstance(maybe_regex, RegexObject): return re.match(maybe_regex, request_origin) elif probably_regex(maybe_regex): return re.match(maybe_regex, request_origin, flags=re.IGNORECASE) else: try: return request_origin.lower() == maybe_regex.lower() except AttributeError: return request_origin == maybe_regex def get_cors_options(appInstance, *dicts): """ Compute CORS options for an application by combining the DEFAULT_OPTIONS, the app's configuration-specified options and any dictionaries passed. The last specified option wins. """ options = DEFAULT_OPTIONS.copy() options.update(get_app_kwarg_dict(appInstance)) if dicts: for d in dicts: options.update(d) return serialize_options(options) def get_app_kwarg_dict(appInstance=None): """Returns the dictionary of CORS specific app configurations.""" app = (appInstance or current_app) # In order to support blueprints which do not have a config attribute app_config = getattr(app, 'config', {}) return { k.lower().replace('cors_', ''): app_config.get(k) for k in CONFIG_OPTIONS if app_config.get(k) is not None } def flexible_str(obj): """ A more flexible str function which intelligently handles stringifying strings, lists and other iterables. The results are lexographically sorted to ensure generated responses are consistent when iterables such as Set are used. """ if obj is None: return None elif(not isinstance(obj, string_types) and isinstance(obj, Iterable)): return ', '.join(str(item) for item in sorted(obj)) else: return str(obj) def serialize_option(options_dict, key, upper=False): if key in options_dict: value = flexible_str(options_dict[key]) options_dict[key] = value.upper() if upper else value def ensure_iterable(inst): """ Wraps scalars or string types as a list, or returns the iterable instance. """ if isinstance(inst, string_types): return [inst] elif not isinstance(inst, Iterable): return [inst] else: return inst def sanitize_regex_param(param): return [re_fix(x) for x in ensure_iterable(param)] def serialize_options(opts): """ A helper method to serialize and processes the options dictionary. """ options = (opts or {}).copy() for key in opts.keys(): if key not in DEFAULT_OPTIONS: LOG.warning("Unknown option passed to Flask-CORS: %s", key) # Ensure origins is a list of allowed origins with at least one entry. options['origins'] = sanitize_regex_param(options.get('origins')) options['allow_headers'] = sanitize_regex_param(options.get('allow_headers')) # This is expressly forbidden by the spec. Raise a value error so people # don't get burned in production. if r'.*' in options['origins'] and options['supports_credentials'] and options['send_wildcard']: raise ValueError("Cannot use supports_credentials in conjunction with" "an origin string of '*'. See: " "http://www.w3.org/TR/cors/#resource-requests") serialize_option(options, 'expose_headers') serialize_option(options, 'methods', upper=True) if isinstance(options.get('max_age'), timedelta): options['max_age'] = str(int(options['max_age'].total_seconds())) return options
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/flask_cors/extension.py
# -*- coding: utf-8 -*- """ extension ~~~~ Flask-CORS is a simple extension to Flask allowing you to support cross origin resource sharing (CORS) using a simple decorator. :copyright: (c) 2016 by Cory Dolphin. :license: MIT, see LICENSE for more details. """ from flask import request from .core import * try: from urllib.parse import unquote_plus except ImportError: from urllib import unquote_plus LOG = logging.getLogger(__name__) class CORS(object): """ Initializes Cross Origin Resource sharing for the application. The arguments are identical to :py:func:`cross_origin`, with the addition of a `resources` parameter. The resources parameter defines a series of regular expressions for resource paths to match and optionally, the associated options to be applied to the particular resource. These options are identical to the arguments to :py:func:`cross_origin`. The settings for CORS are determined in the following order 1. Resource level settings (e.g when passed as a dictionary) 2. Keyword argument settings 3. App level configuration settings (e.g. CORS_*) 4. Default settings Note: as it is possible for multiple regular expressions to match a resource path, the regular expressions are first sorted by length, from longest to shortest, in order to attempt to match the most specific regular expression. This allows the definition of a number of specific resource options, with a wildcard fallback for all other resources. :param resources: The series of regular expression and (optionally) associated CORS options to be applied to the given resource path. If the argument is a dictionary, it's keys must be regular expressions, and the values must be a dictionary of kwargs, identical to the kwargs of this function. If the argument is a list, it is expected to be a list of regular expressions, for which the app-wide configured options are applied. If the argument is a string, it is expected to be a regular expression for which the app-wide configured options are applied. Default : Match all and apply app-level configuration :type resources: dict, iterable or string :param origins: The origin, or list of origins to allow requests from. The origin(s) may be regular expressions, case-sensitive strings, or else an asterisk Default : '*' :type origins: list, string or regex :param methods: The method or list of methods which the allowed origins are allowed to access for non-simple requests. Default : [GET, HEAD, POST, OPTIONS, PUT, PATCH, DELETE] :type methods: list or string :param expose_headers: The header or list which are safe to expose to the API of a CORS API specification. Default : None :type expose_headers: list or string :param allow_headers: The header or list of header field names which can be used when this resource is accessed by allowed origins. The header(s) may be regular expressions, case-sensitive strings, or else an asterisk. Default : '*', allow all headers :type allow_headers: list, string or regex :param supports_credentials: Allows users to make authenticated requests. If true, injects the `Access-Control-Allow-Credentials` header in responses. This allows cookies and credentials to be submitted across domains. :note: This option cannot be used in conjunction with a '*' origin Default : False :type supports_credentials: bool :param max_age: The maximum time for which this CORS request maybe cached. This value is set as the `Access-Control-Max-Age` header. Default : None :type max_age: timedelta, integer, string or None :param send_wildcard: If True, and the origins parameter is `*`, a wildcard `Access-Control-Allow-Origin` header is sent, rather than the request's `Origin` header. Default : False :type send_wildcard: bool :param vary_header: If True, the header Vary: Origin will be returned as per the W3 implementation guidelines. Setting this header when the `Access-Control-Allow-Origin` is dynamically generated (e.g. when there is more than one allowed origin, and an Origin than '*' is returned) informs CDNs and other caches that the CORS headers are dynamic, and cannot be cached. If False, the Vary header will never be injected or altered. Default : True :type vary_header: bool """ def __init__(self, app=None, **kwargs): self._options = kwargs if app is not None: self.init_app(app, **kwargs) def init_app(self, app, **kwargs): # The resources and options may be specified in the App Config, the CORS constructor # or the kwargs to the call to init_app. options = get_cors_options(app, self._options, kwargs) # Flatten our resources into a list of the form # (pattern_or_regexp, dictionary_of_options) resources = parse_resources(options.get('resources')) # Compute the options for each resource by combining the options from # the app's configuration, the constructor, the kwargs to init_app, and # finally the options specified in the resources dictionary. resources = [ (pattern, get_cors_options(app, options, opts)) for (pattern, opts) in resources ] # Create a human readable form of these resources by converting the compiled # regular expressions into strings. resources_human = {get_regexp_pattern(pattern): opts for (pattern,opts) in resources} LOG.debug("Configuring CORS with resources: %s", resources_human) cors_after_request = make_after_request_function(resources) app.after_request(cors_after_request) # Wrap exception handlers with cross_origin # These error handlers will still respect the behavior of the route if options.get('intercept_exceptions', True): def _after_request_decorator(f): def wrapped_function(*args, **kwargs): return cors_after_request(app.make_response(f(*args, **kwargs))) return wrapped_function if hasattr(app, 'handle_exception'): app.handle_exception = _after_request_decorator( app.handle_exception) app.handle_user_exception = _after_request_decorator( app.handle_user_exception) def make_after_request_function(resources): def cors_after_request(resp): # If CORS headers are set in a view decorator, pass if resp.headers is not None and resp.headers.get(ACL_ORIGIN): LOG.debug('CORS have been already evaluated, skipping') return resp normalized_path = unquote_plus(request.path) for res_regex, res_options in resources: if try_match(normalized_path, res_regex): LOG.debug("Request to '%s' matches CORS resource '%s'. Using options: %s", request.path, get_regexp_pattern(res_regex), res_options) set_cors_headers(resp, res_options) break else: LOG.debug('No CORS rule matches') return resp return cors_after_request
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/asyncsupport.py
# -*- coding: utf-8 -*- """The code for async support. Importing this patches Jinja on supported Python versions. """ import asyncio import inspect from functools import update_wrapper from markupsafe import Markup from .environment import TemplateModule from .runtime import LoopContext from .utils import concat from .utils import internalcode from .utils import missing async def concat_async(async_gen): rv = [] async def collect(): async for event in async_gen: rv.append(event) await collect() return concat(rv) async def generate_async(self, *args, **kwargs): vars = dict(*args, **kwargs) try: async for event in self.root_render_func(self.new_context(vars)): yield event except Exception: yield self.environment.handle_exception() def wrap_generate_func(original_generate): def _convert_generator(self, loop, args, kwargs): async_gen = self.generate_async(*args, **kwargs) try: while 1: yield loop.run_until_complete(async_gen.__anext__()) except StopAsyncIteration: pass def generate(self, *args, **kwargs): if not self.environment.is_async: return original_generate(self, *args, **kwargs) return _convert_generator(self, asyncio.get_event_loop(), args, kwargs) return update_wrapper(generate, original_generate) async def render_async(self, *args, **kwargs): if not self.environment.is_async: raise RuntimeError("The environment was not created with async mode enabled.") vars = dict(*args, **kwargs) ctx = self.new_context(vars) try: return await concat_async(self.root_render_func(ctx)) except Exception: return self.environment.handle_exception() def wrap_render_func(original_render): def render(self, *args, **kwargs): if not self.environment.is_async: return original_render(self, *args, **kwargs) loop = asyncio.get_event_loop() return loop.run_until_complete(self.render_async(*args, **kwargs)) return update_wrapper(render, original_render) def wrap_block_reference_call(original_call): @internalcode async def async_call(self): rv = await concat_async(self._stack[self._depth](self._context)) if self._context.eval_ctx.autoescape: rv = Markup(rv) return rv @internalcode def __call__(self): if not self._context.environment.is_async: return original_call(self) return async_call(self) return update_wrapper(__call__, original_call) def wrap_macro_invoke(original_invoke): @internalcode async def async_invoke(self, arguments, autoescape): rv = await self._func(*arguments) if autoescape: rv = Markup(rv) return rv @internalcode def _invoke(self, arguments, autoescape): if not self._environment.is_async: return original_invoke(self, arguments, autoescape) return async_invoke(self, arguments, autoescape) return update_wrapper(_invoke, original_invoke) @internalcode async def get_default_module_async(self): if self._module is not None: return self._module self._module = rv = await self.make_module_async() return rv def wrap_default_module(original_default_module): @internalcode def _get_default_module(self): if self.environment.is_async: raise RuntimeError("Template module attribute is unavailable in async mode") return original_default_module(self) return _get_default_module async def make_module_async(self, vars=None, shared=False, locals=None): context = self.new_context(vars, shared, locals) body_stream = [] async for item in self.root_render_func(context): body_stream.append(item) return TemplateModule(self, context, body_stream) def patch_template(): from . import Template Template.generate = wrap_generate_func(Template.generate) Template.generate_async = update_wrapper(generate_async, Template.generate_async) Template.render_async = update_wrapper(render_async, Template.render_async) Template.render = wrap_render_func(Template.render) Template._get_default_module = wrap_default_module(Template._get_default_module) Template._get_default_module_async = get_default_module_async Template.make_module_async = update_wrapper( make_module_async, Template.make_module_async ) def patch_runtime(): from .runtime import BlockReference, Macro BlockReference.__call__ = wrap_block_reference_call(BlockReference.__call__) Macro._invoke = wrap_macro_invoke(Macro._invoke) def patch_filters(): from .filters import FILTERS from .asyncfilters import ASYNC_FILTERS FILTERS.update(ASYNC_FILTERS) def patch_all(): patch_template() patch_runtime() patch_filters() async def auto_await(value): if inspect.isawaitable(value): return await value return value async def auto_aiter(iterable): if hasattr(iterable, "__aiter__"): async for item in iterable: yield item return for item in iterable: yield item class AsyncLoopContext(LoopContext): _to_iterator = staticmethod(auto_aiter) @property async def length(self): if self._length is not None: return self._length try: self._length = len(self._iterable) except TypeError: iterable = [x async for x in self._iterator] self._iterator = self._to_iterator(iterable) self._length = len(iterable) + self.index + (self._after is not missing) return self._length @property async def revindex0(self): return await self.length - self.index @property async def revindex(self): return await self.length - self.index0 async def _peek_next(self): if self._after is not missing: return self._after try: self._after = await self._iterator.__anext__() except StopAsyncIteration: self._after = missing return self._after @property async def last(self): return await self._peek_next() is missing @property async def nextitem(self): rv = await self._peek_next() if rv is missing: return self._undefined("there is no next item") return rv def __aiter__(self): return self async def __anext__(self): if self._after is not missing: rv = self._after self._after = missing else: rv = await self._iterator.__anext__() self.index0 += 1 self._before = self._current self._current = rv return rv, self async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0): import warnings warnings.warn( "This template must be recompiled with at least Jinja 2.11, or" " it will fail in 3.0.", DeprecationWarning, stacklevel=2, ) return AsyncLoopContext(iterable, undefined, recurse, depth0) patch_all()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/compiler.py
# -*- coding: utf-8 -*- """Compiles nodes from the parser into Python code.""" from collections import namedtuple from functools import update_wrapper from itertools import chain from keyword import iskeyword as is_python_keyword from markupsafe import escape from markupsafe import Markup from . import nodes from ._compat import imap from ._compat import iteritems from ._compat import izip from ._compat import NativeStringIO from ._compat import range_type from ._compat import string_types from ._compat import text_type from .exceptions import TemplateAssertionError from .idtracking import Symbols from .idtracking import VAR_LOAD_ALIAS from .idtracking import VAR_LOAD_PARAMETER from .idtracking import VAR_LOAD_RESOLVE from .idtracking import VAR_LOAD_UNDEFINED from .nodes import EvalContext from .optimizer import Optimizer from .utils import concat from .visitor import NodeVisitor operators = { "eq": "==", "ne": "!=", "gt": ">", "gteq": ">=", "lt": "<", "lteq": "<=", "in": "in", "notin": "not in", } # what method to iterate over items do we want to use for dict iteration # in generated code? on 2.x let's go with iteritems, on 3.x with items if hasattr(dict, "iteritems"): dict_item_iter = "iteritems" else: dict_item_iter = "items" code_features = ["division"] # does this python version support generator stops? (PEP 0479) try: exec("from __future__ import generator_stop") code_features.append("generator_stop") except SyntaxError: pass # does this python version support yield from? try: exec("def f(): yield from x()") except SyntaxError: supports_yield_from = False else: supports_yield_from = True def optimizeconst(f): def new_func(self, node, frame, **kwargs): # Only optimize if the frame is not volatile if self.optimized and not frame.eval_ctx.volatile: new_node = self.optimizer.visit(node, frame.eval_ctx) if new_node != node: return self.visit(new_node, frame) return f(self, node, frame, **kwargs) return update_wrapper(new_func, f) def generate( node, environment, name, filename, stream=None, defer_init=False, optimized=True ): """Generate the python source for a node tree.""" if not isinstance(node, nodes.Template): raise TypeError("Can't compile non template nodes") generator = environment.code_generator_class( environment, name, filename, stream, defer_init, optimized ) generator.visit(node) if stream is None: return generator.stream.getvalue() def has_safe_repr(value): """Does the node have a safe representation?""" if value is None or value is NotImplemented or value is Ellipsis: return True if type(value) in (bool, int, float, complex, range_type, Markup) + string_types: return True if type(value) in (tuple, list, set, frozenset): for item in value: if not has_safe_repr(item): return False return True elif type(value) is dict: for key, value in iteritems(value): if not has_safe_repr(key): return False if not has_safe_repr(value): return False return True return False def find_undeclared(nodes, names): """Check if the names passed are accessed undeclared. The return value is a set of all the undeclared names from the sequence of names found. """ visitor = UndeclaredNameVisitor(names) try: for node in nodes: visitor.visit(node) except VisitorExit: pass return visitor.undeclared class MacroRef(object): def __init__(self, node): self.node = node self.accesses_caller = False self.accesses_kwargs = False self.accesses_varargs = False class Frame(object): """Holds compile time information for us.""" def __init__(self, eval_ctx, parent=None, level=None): self.eval_ctx = eval_ctx self.symbols = Symbols(parent and parent.symbols or None, level=level) # a toplevel frame is the root + soft frames such as if conditions. self.toplevel = False # the root frame is basically just the outermost frame, so no if # conditions. This information is used to optimize inheritance # situations. self.rootlevel = False # in some dynamic inheritance situations the compiler needs to add # write tests around output statements. self.require_output_check = parent and parent.require_output_check # inside some tags we are using a buffer rather than yield statements. # this for example affects {% filter %} or {% macro %}. If a frame # is buffered this variable points to the name of the list used as # buffer. self.buffer = None # the name of the block we're in, otherwise None. self.block = parent and parent.block or None # the parent of this frame self.parent = parent if parent is not None: self.buffer = parent.buffer def copy(self): """Create a copy of the current one.""" rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.symbols = self.symbols.copy() return rv def inner(self, isolated=False): """Return an inner frame.""" if isolated: return Frame(self.eval_ctx, level=self.symbols.level + 1) return Frame(self.eval_ctx, self) def soft(self): """Return a soft frame. A soft frame may not be modified as standalone thing as it shares the resources with the frame it was created of, but it's not a rootlevel frame any longer. This is only used to implement if-statements. """ rv = self.copy() rv.rootlevel = False return rv __copy__ = copy class VisitorExit(RuntimeError): """Exception used by the `UndeclaredNameVisitor` to signal a stop.""" class DependencyFinderVisitor(NodeVisitor): """A visitor that collects filter and test calls.""" def __init__(self): self.filters = set() self.tests = set() def visit_Filter(self, node): self.generic_visit(node) self.filters.add(node.name) def visit_Test(self, node): self.generic_visit(node) self.tests.add(node.name) def visit_Block(self, node): """Stop visiting at blocks.""" class UndeclaredNameVisitor(NodeVisitor): """A visitor that checks if a name is accessed without being declared. This is different from the frame visitor as it will not stop at closure frames. """ def __init__(self, names): self.names = set(names) self.undeclared = set() def visit_Name(self, node): if node.ctx == "load" and node.name in self.names: self.undeclared.add(node.name) if self.undeclared == self.names: raise VisitorExit() else: self.names.discard(node.name) def visit_Block(self, node): """Stop visiting a blocks.""" class CompilerExit(Exception): """Raised if the compiler encountered a situation where it just doesn't make sense to further process the code. Any block that raises such an exception is not further processed. """ class CodeGenerator(NodeVisitor): def __init__( self, environment, name, filename, stream=None, defer_init=False, optimized=True ): if stream is None: stream = NativeStringIO() self.environment = environment self.name = name self.filename = filename self.stream = stream self.created_block_context = False self.defer_init = defer_init self.optimized = optimized if optimized: self.optimizer = Optimizer(environment) # aliases for imports self.import_aliases = {} # a registry for all blocks. Because blocks are moved out # into the global python scope they are registered here self.blocks = {} # the number of extends statements so far self.extends_so_far = 0 # some templates have a rootlevel extends. In this case we # can safely assume that we're a child template and do some # more optimizations. self.has_known_extends = False # the current line number self.code_lineno = 1 # registry of all filters and tests (global, not block local) self.tests = {} self.filters = {} # the debug information self.debug_info = [] self._write_debug_info = None # the number of new lines before the next write() self._new_lines = 0 # the line number of the last written statement self._last_line = 0 # true if nothing was written so far. self._first_write = True # used by the `temporary_identifier` method to get new # unique, temporary identifier self._last_identifier = 0 # the current indentation self._indentation = 0 # Tracks toplevel assignments self._assign_stack = [] # Tracks parameter definition blocks self._param_def_block = [] # Tracks the current context. self._context_reference_stack = ["context"] # -- Various compilation helpers def fail(self, msg, lineno): """Fail with a :exc:`TemplateAssertionError`.""" raise TemplateAssertionError(msg, lineno, self.name, self.filename) def temporary_identifier(self): """Get a new unique identifier.""" self._last_identifier += 1 return "t_%d" % self._last_identifier def buffer(self, frame): """Enable buffering for the frame from that point onwards.""" frame.buffer = self.temporary_identifier() self.writeline("%s = []" % frame.buffer) def return_buffer_contents(self, frame, force_unescaped=False): """Return the buffer contents of the frame.""" if not force_unescaped: if frame.eval_ctx.volatile: self.writeline("if context.eval_ctx.autoescape:") self.indent() self.writeline("return Markup(concat(%s))" % frame.buffer) self.outdent() self.writeline("else:") self.indent() self.writeline("return concat(%s)" % frame.buffer) self.outdent() return elif frame.eval_ctx.autoescape: self.writeline("return Markup(concat(%s))" % frame.buffer) return self.writeline("return concat(%s)" % frame.buffer) def indent(self): """Indent by one.""" self._indentation += 1 def outdent(self, step=1): """Outdent by step.""" self._indentation -= step def start_write(self, frame, node=None): """Yield or write into the frame buffer.""" if frame.buffer is None: self.writeline("yield ", node) else: self.writeline("%s.append(" % frame.buffer, node) def end_write(self, frame): """End the writing process started by `start_write`.""" if frame.buffer is not None: self.write(")") def simple_write(self, s, frame, node=None): """Simple shortcut for start_write + write + end_write.""" self.start_write(frame, node) self.write(s) self.end_write(frame) def blockvisit(self, nodes, frame): """Visit a list of nodes as block in a frame. If the current frame is no buffer a dummy ``if 0: yield None`` is written automatically. """ try: self.writeline("pass") for node in nodes: self.visit(node, frame) except CompilerExit: pass def write(self, x): """Write a string into the output stream.""" if self._new_lines: if not self._first_write: self.stream.write("\n" * self._new_lines) self.code_lineno += self._new_lines if self._write_debug_info is not None: self.debug_info.append((self._write_debug_info, self.code_lineno)) self._write_debug_info = None self._first_write = False self.stream.write(" " * self._indentation) self._new_lines = 0 self.stream.write(x) def writeline(self, x, node=None, extra=0): """Combination of newline and write.""" self.newline(node, extra) self.write(x) def newline(self, node=None, extra=0): """Add one or more newlines before the next write.""" self._new_lines = max(self._new_lines, 1 + extra) if node is not None and node.lineno != self._last_line: self._write_debug_info = node.lineno self._last_line = node.lineno def signature(self, node, frame, extra_kwargs=None): """Writes a function call to the stream for the current node. A leading comma is added automatically. The extra keyword arguments may not include python keywords otherwise a syntax error could occur. The extra keyword arguments should be given as python dict. """ # if any of the given keyword arguments is a python keyword # we have to make sure that no invalid call is created. kwarg_workaround = False for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()): if is_python_keyword(kwarg): kwarg_workaround = True break for arg in node.args: self.write(", ") self.visit(arg, frame) if not kwarg_workaround: for kwarg in node.kwargs: self.write(", ") self.visit(kwarg, frame) if extra_kwargs is not None: for key, value in iteritems(extra_kwargs): self.write(", %s=%s" % (key, value)) if node.dyn_args: self.write(", *") self.visit(node.dyn_args, frame) if kwarg_workaround: if node.dyn_kwargs is not None: self.write(", **dict({") else: self.write(", **{") for kwarg in node.kwargs: self.write("%r: " % kwarg.key) self.visit(kwarg.value, frame) self.write(", ") if extra_kwargs is not None: for key, value in iteritems(extra_kwargs): self.write("%r: %s, " % (key, value)) if node.dyn_kwargs is not None: self.write("}, **") self.visit(node.dyn_kwargs, frame) self.write(")") else: self.write("}") elif node.dyn_kwargs is not None: self.write(", **") self.visit(node.dyn_kwargs, frame) def pull_dependencies(self, nodes): """Pull all the dependencies.""" visitor = DependencyFinderVisitor() for node in nodes: visitor.visit(node) for dependency in "filters", "tests": mapping = getattr(self, dependency) for name in getattr(visitor, dependency): if name not in mapping: mapping[name] = self.temporary_identifier() self.writeline( "%s = environment.%s[%r]" % (mapping[name], dependency, name) ) def enter_frame(self, frame): undefs = [] for target, (action, param) in iteritems(frame.symbols.loads): if action == VAR_LOAD_PARAMETER: pass elif action == VAR_LOAD_RESOLVE: self.writeline("%s = %s(%r)" % (target, self.get_resolve_func(), param)) elif action == VAR_LOAD_ALIAS: self.writeline("%s = %s" % (target, param)) elif action == VAR_LOAD_UNDEFINED: undefs.append(target) else: raise NotImplementedError("unknown load instruction") if undefs: self.writeline("%s = missing" % " = ".join(undefs)) def leave_frame(self, frame, with_python_scope=False): if not with_python_scope: undefs = [] for target, _ in iteritems(frame.symbols.loads): undefs.append(target) if undefs: self.writeline("%s = missing" % " = ".join(undefs)) def func(self, name): if self.environment.is_async: return "async def %s" % name return "def %s" % name def macro_body(self, node, frame): """Dump the function def of a macro or call block.""" frame = frame.inner() frame.symbols.analyze_node(node) macro_ref = MacroRef(node) explicit_caller = None skip_special_params = set() args = [] for idx, arg in enumerate(node.args): if arg.name == "caller": explicit_caller = idx if arg.name in ("kwargs", "varargs"): skip_special_params.add(arg.name) args.append(frame.symbols.ref(arg.name)) undeclared = find_undeclared(node.body, ("caller", "kwargs", "varargs")) if "caller" in undeclared: # In older Jinja versions there was a bug that allowed caller # to retain the special behavior even if it was mentioned in # the argument list. However thankfully this was only really # working if it was the last argument. So we are explicitly # checking this now and error out if it is anywhere else in # the argument list. if explicit_caller is not None: try: node.defaults[explicit_caller - len(node.args)] except IndexError: self.fail( "When defining macros or call blocks the " 'special "caller" argument must be omitted ' "or be given a default.", node.lineno, ) else: args.append(frame.symbols.declare_parameter("caller")) macro_ref.accesses_caller = True if "kwargs" in undeclared and "kwargs" not in skip_special_params: args.append(frame.symbols.declare_parameter("kwargs")) macro_ref.accesses_kwargs = True if "varargs" in undeclared and "varargs" not in skip_special_params: args.append(frame.symbols.declare_parameter("varargs")) macro_ref.accesses_varargs = True # macros are delayed, they never require output checks frame.require_output_check = False frame.symbols.analyze_node(node) self.writeline("%s(%s):" % (self.func("macro"), ", ".join(args)), node) self.indent() self.buffer(frame) self.enter_frame(frame) self.push_parameter_definitions(frame) for idx, arg in enumerate(node.args): ref = frame.symbols.ref(arg.name) self.writeline("if %s is missing:" % ref) self.indent() try: default = node.defaults[idx - len(node.args)] except IndexError: self.writeline( "%s = undefined(%r, name=%r)" % (ref, "parameter %r was not provided" % arg.name, arg.name) ) else: self.writeline("%s = " % ref) self.visit(default, frame) self.mark_parameter_stored(ref) self.outdent() self.pop_parameter_definitions() self.blockvisit(node.body, frame) self.return_buffer_contents(frame, force_unescaped=True) self.leave_frame(frame, with_python_scope=True) self.outdent() return frame, macro_ref def macro_def(self, macro_ref, frame): """Dump the macro definition for the def created by macro_body.""" arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args) name = getattr(macro_ref.node, "name", None) if len(macro_ref.node.args) == 1: arg_tuple += "," self.write( "Macro(environment, macro, %r, (%s), %r, %r, %r, " "context.eval_ctx.autoescape)" % ( name, arg_tuple, macro_ref.accesses_kwargs, macro_ref.accesses_varargs, macro_ref.accesses_caller, ) ) def position(self, node): """Return a human readable position for the node.""" rv = "line %d" % node.lineno if self.name is not None: rv += " in " + repr(self.name) return rv def dump_local_context(self, frame): return "{%s}" % ", ".join( "%r: %s" % (name, target) for name, target in iteritems(frame.symbols.dump_stores()) ) def write_commons(self): """Writes a common preamble that is used by root and block functions. Primarily this sets up common local helpers and enforces a generator through a dead branch. """ self.writeline("resolve = context.resolve_or_missing") self.writeline("undefined = environment.undefined") # always use the standard Undefined class for the implicit else of # conditional expressions self.writeline("cond_expr_undefined = Undefined") self.writeline("if 0: yield None") def push_parameter_definitions(self, frame): """Pushes all parameter targets from the given frame into a local stack that permits tracking of yet to be assigned parameters. In particular this enables the optimization from `visit_Name` to skip undefined expressions for parameters in macros as macros can reference otherwise unbound parameters. """ self._param_def_block.append(frame.symbols.dump_param_targets()) def pop_parameter_definitions(self): """Pops the current parameter definitions set.""" self._param_def_block.pop() def mark_parameter_stored(self, target): """Marks a parameter in the current parameter definitions as stored. This will skip the enforced undefined checks. """ if self._param_def_block: self._param_def_block[-1].discard(target) def push_context_reference(self, target): self._context_reference_stack.append(target) def pop_context_reference(self): self._context_reference_stack.pop() def get_context_ref(self): return self._context_reference_stack[-1] def get_resolve_func(self): target = self._context_reference_stack[-1] if target == "context": return "resolve" return "%s.resolve" % target def derive_context(self, frame): return "%s.derived(%s)" % ( self.get_context_ref(), self.dump_local_context(frame), ) def parameter_is_undeclared(self, target): """Checks if a given target is an undeclared parameter.""" if not self._param_def_block: return False return target in self._param_def_block[-1] def push_assign_tracking(self): """Pushes a new layer for assignment tracking.""" self._assign_stack.append(set()) def pop_assign_tracking(self, frame): """Pops the topmost level for assignment tracking and updates the context variables if necessary. """ vars = self._assign_stack.pop() if not frame.toplevel or not vars: return public_names = [x for x in vars if x[:1] != "_"] if len(vars) == 1: name = next(iter(vars)) ref = frame.symbols.ref(name) self.writeline("context.vars[%r] = %s" % (name, ref)) else: self.writeline("context.vars.update({") for idx, name in enumerate(vars): if idx: self.write(", ") ref = frame.symbols.ref(name) self.write("%r: %s" % (name, ref)) self.write("})") if public_names: if len(public_names) == 1: self.writeline("context.exported_vars.add(%r)" % public_names[0]) else: self.writeline( "context.exported_vars.update((%s))" % ", ".join(imap(repr, public_names)) ) # -- Statement Visitors def visit_Template(self, node, frame=None): assert frame is None, "no root frame allowed" eval_ctx = EvalContext(self.environment, self.name) from .runtime import exported self.writeline("from __future__ import %s" % ", ".join(code_features)) self.writeline("from jinja2.runtime import " + ", ".join(exported)) if self.environment.is_async: self.writeline( "from jinja2.asyncsupport import auto_await, " "auto_aiter, AsyncLoopContext" ) # if we want a deferred initialization we cannot move the # environment into a local name envenv = not self.defer_init and ", environment=environment" or "" # do we have an extends tag at all? If not, we can save some # overhead by just not processing any inheritance code. have_extends = node.find(nodes.Extends) is not None # find all blocks for block in node.find_all(nodes.Block): if block.name in self.blocks: self.fail("block %r defined twice" % block.name, block.lineno) self.blocks[block.name] = block # find all imports and import them for import_ in node.find_all(nodes.ImportedName): if import_.importname not in self.import_aliases: imp = import_.importname self.import_aliases[imp] = alias = self.temporary_identifier() if "." in imp: module, obj = imp.rsplit(".", 1) self.writeline("from %s import %s as %s" % (module, obj, alias)) else: self.writeline("import %s as %s" % (imp, alias)) # add the load name self.writeline("name = %r" % self.name) # generate the root render function. self.writeline( "%s(context, missing=missing%s):" % (self.func("root"), envenv), extra=1 ) self.indent() self.write_commons() # process the root frame = Frame(eval_ctx) if "self" in find_undeclared(node.body, ("self",)): ref = frame.symbols.declare_parameter("self") self.writeline("%s = TemplateReference(context)" % ref) frame.symbols.analyze_node(node) frame.toplevel = frame.rootlevel = True frame.require_output_check = have_extends and not self.has_known_extends if have_extends: self.writeline("parent_template = None") self.enter_frame(frame) self.pull_dependencies(node.body) self.blockvisit(node.body, frame) self.leave_frame(frame, with_python_scope=True) self.outdent() # make sure that the parent root is called. if have_extends: if not self.has_known_extends: self.indent() self.writeline("if parent_template is not None:") self.indent() if supports_yield_from and not self.environment.is_async: self.writeline("yield from parent_template.root_render_func(context)") else: self.writeline( "%sfor event in parent_template." "root_render_func(context):" % (self.environment.is_async and "async " or "") ) self.indent() self.writeline("yield event") self.outdent() self.outdent(1 + (not self.has_known_extends)) # at this point we now have the blocks collected and can visit them too. for name, block in iteritems(self.blocks): self.writeline( "%s(context, missing=missing%s):" % (self.func("block_" + name), envenv), block, 1, ) self.indent() self.write_commons() # It's important that we do not make this frame a child of the # toplevel template. This would cause a variety of # interesting issues with identifier tracking. block_frame = Frame(eval_ctx) undeclared = find_undeclared(block.body, ("self", "super")) if "self" in undeclared: ref = block_frame.symbols.declare_parameter("self") self.writeline("%s = TemplateReference(context)" % ref) if "super" in undeclared: ref = block_frame.symbols.declare_parameter("super") self.writeline("%s = context.super(%r, block_%s)" % (ref, name, name)) block_frame.symbols.analyze_node(block) block_frame.block = name self.enter_frame(block_frame) self.pull_dependencies(block.body) self.blockvisit(block.body, block_frame) self.leave_frame(block_frame, with_python_scope=True) self.outdent() self.writeline( "blocks = {%s}" % ", ".join("%r: block_%s" % (x, x) for x in self.blocks), extra=1, ) # add a function that returns the debug info self.writeline( "debug_info = %r" % "&".join("%s=%s" % x for x in self.debug_info) ) def visit_Block(self, node, frame): """Call a block and register it for the template.""" level = 0 if frame.toplevel: # if we know that we are a child template, there is no need to # check if we are one if self.has_known_extends: return if self.extends_so_far > 0: self.writeline("if parent_template is None:") self.indent() level += 1 if node.scoped: context = self.derive_context(frame) else: context = self.get_context_ref() if ( supports_yield_from and not self.environment.is_async and frame.buffer is None ): self.writeline( "yield from context.blocks[%r][0](%s)" % (node.name, context), node ) else: loop = self.environment.is_async and "async for" or "for" self.writeline( "%s event in context.blocks[%r][0](%s):" % (loop, node.name, context), node, ) self.indent() self.simple_write("event", frame) self.outdent() self.outdent(level) def visit_Extends(self, node, frame): """Calls the extender.""" if not frame.toplevel: self.fail("cannot use extend from a non top-level scope", node.lineno) # if the number of extends statements in general is zero so # far, we don't have to add a check if something extended # the template before this one. if self.extends_so_far > 0: # if we have a known extends we just add a template runtime # error into the generated code. We could catch that at compile # time too, but i welcome it not to confuse users by throwing the # same error at different times just "because we can". if not self.has_known_extends: self.writeline("if parent_template is not None:") self.indent() self.writeline("raise TemplateRuntimeError(%r)" % "extended multiple times") # if we have a known extends already we don't need that code here # as we know that the template execution will end here. if self.has_known_extends: raise CompilerExit() else: self.outdent() self.writeline("parent_template = environment.get_template(", node) self.visit(node.template, frame) self.write(", %r)" % self.name) self.writeline( "for name, parent_block in parent_template.blocks.%s():" % dict_item_iter ) self.indent() self.writeline("context.blocks.setdefault(name, []).append(parent_block)") self.outdent() # if this extends statement was in the root level we can take # advantage of that information and simplify the generated code # in the top level from this point onwards if frame.rootlevel: self.has_known_extends = True # and now we have one more self.extends_so_far += 1 def visit_Include(self, node, frame): """Handles includes.""" if node.ignore_missing: self.writeline("try:") self.indent() func_name = "get_or_select_template" if isinstance(node.template, nodes.Const): if isinstance(node.template.value, string_types): func_name = "get_template" elif isinstance(node.template.value, (tuple, list)): func_name = "select_template" elif isinstance(node.template, (nodes.Tuple, nodes.List)): func_name = "select_template" self.writeline("template = environment.%s(" % func_name, node) self.visit(node.template, frame) self.write(", %r)" % self.name) if node.ignore_missing: self.outdent() self.writeline("except TemplateNotFound:") self.indent() self.writeline("pass") self.outdent() self.writeline("else:") self.indent() skip_event_yield = False if node.with_context: loop = self.environment.is_async and "async for" or "for" self.writeline( "%s event in template.root_render_func(" "template.new_context(context.get_all(), True, " "%s)):" % (loop, self.dump_local_context(frame)) ) elif self.environment.is_async: self.writeline( "for event in (await " "template._get_default_module_async())" "._body_stream:" ) else: if supports_yield_from: self.writeline("yield from template._get_default_module()._body_stream") skip_event_yield = True else: self.writeline( "for event in template._get_default_module()._body_stream:" ) if not skip_event_yield: self.indent() self.simple_write("event", frame) self.outdent() if node.ignore_missing: self.outdent() def visit_Import(self, node, frame): """Visit regular imports.""" self.writeline("%s = " % frame.symbols.ref(node.target), node) if frame.toplevel: self.write("context.vars[%r] = " % node.target) if self.environment.is_async: self.write("await ") self.write("environment.get_template(") self.visit(node.template, frame) self.write(", %r)." % self.name) if node.with_context: self.write( "make_module%s(context.get_all(), True, %s)" % ( self.environment.is_async and "_async" or "", self.dump_local_context(frame), ) ) elif self.environment.is_async: self.write("_get_default_module_async()") else: self.write("_get_default_module()") if frame.toplevel and not node.target.startswith("_"): self.writeline("context.exported_vars.discard(%r)" % node.target) def visit_FromImport(self, node, frame): """Visit named imports.""" self.newline(node) self.write( "included_template = %senvironment.get_template(" % (self.environment.is_async and "await " or "") ) self.visit(node.template, frame) self.write(", %r)." % self.name) if node.with_context: self.write( "make_module%s(context.get_all(), True, %s)" % ( self.environment.is_async and "_async" or "", self.dump_local_context(frame), ) ) elif self.environment.is_async: self.write("_get_default_module_async()") else: self.write("_get_default_module()") var_names = [] discarded_names = [] for name in node.names: if isinstance(name, tuple): name, alias = name else: alias = name self.writeline( "%s = getattr(included_template, " "%r, missing)" % (frame.symbols.ref(alias), name) ) self.writeline("if %s is missing:" % frame.symbols.ref(alias)) self.indent() self.writeline( "%s = undefined(%r %% " "included_template.__name__, " "name=%r)" % ( frame.symbols.ref(alias), "the template %%r (imported on %s) does " "not export the requested name %s" % (self.position(node), repr(name)), name, ) ) self.outdent() if frame.toplevel: var_names.append(alias) if not alias.startswith("_"): discarded_names.append(alias) if var_names: if len(var_names) == 1: name = var_names[0] self.writeline( "context.vars[%r] = %s" % (name, frame.symbols.ref(name)) ) else: self.writeline( "context.vars.update({%s})" % ", ".join( "%r: %s" % (name, frame.symbols.ref(name)) for name in var_names ) ) if discarded_names: if len(discarded_names) == 1: self.writeline("context.exported_vars.discard(%r)" % discarded_names[0]) else: self.writeline( "context.exported_vars.difference_" "update((%s))" % ", ".join(imap(repr, discarded_names)) ) def visit_For(self, node, frame): loop_frame = frame.inner() test_frame = frame.inner() else_frame = frame.inner() # try to figure out if we have an extended loop. An extended loop # is necessary if the loop is in recursive mode if the special loop # variable is accessed in the body. extended_loop = node.recursive or "loop" in find_undeclared( node.iter_child_nodes(only=("body",)), ("loop",) ) loop_ref = None if extended_loop: loop_ref = loop_frame.symbols.declare_parameter("loop") loop_frame.symbols.analyze_node(node, for_branch="body") if node.else_: else_frame.symbols.analyze_node(node, for_branch="else") if node.test: loop_filter_func = self.temporary_identifier() test_frame.symbols.analyze_node(node, for_branch="test") self.writeline("%s(fiter):" % self.func(loop_filter_func), node.test) self.indent() self.enter_frame(test_frame) self.writeline(self.environment.is_async and "async for " or "for ") self.visit(node.target, loop_frame) self.write(" in ") self.write(self.environment.is_async and "auto_aiter(fiter)" or "fiter") self.write(":") self.indent() self.writeline("if ", node.test) self.visit(node.test, test_frame) self.write(":") self.indent() self.writeline("yield ") self.visit(node.target, loop_frame) self.outdent(3) self.leave_frame(test_frame, with_python_scope=True) # if we don't have an recursive loop we have to find the shadowed # variables at that point. Because loops can be nested but the loop # variable is a special one we have to enforce aliasing for it. if node.recursive: self.writeline( "%s(reciter, loop_render_func, depth=0):" % self.func("loop"), node ) self.indent() self.buffer(loop_frame) # Use the same buffer for the else frame else_frame.buffer = loop_frame.buffer # make sure the loop variable is a special one and raise a template # assertion error if a loop tries to write to loop if extended_loop: self.writeline("%s = missing" % loop_ref) for name in node.find_all(nodes.Name): if name.ctx == "store" and name.name == "loop": self.fail( "Can't assign to special loop variable in for-loop target", name.lineno, ) if node.else_: iteration_indicator = self.temporary_identifier() self.writeline("%s = 1" % iteration_indicator) self.writeline(self.environment.is_async and "async for " or "for ", node) self.visit(node.target, loop_frame) if extended_loop: if self.environment.is_async: self.write(", %s in AsyncLoopContext(" % loop_ref) else: self.write(", %s in LoopContext(" % loop_ref) else: self.write(" in ") if node.test: self.write("%s(" % loop_filter_func) if node.recursive: self.write("reciter") else: if self.environment.is_async and not extended_loop: self.write("auto_aiter(") self.visit(node.iter, frame) if self.environment.is_async and not extended_loop: self.write(")") if node.test: self.write(")") if node.recursive: self.write(", undefined, loop_render_func, depth):") else: self.write(extended_loop and ", undefined):" or ":") self.indent() self.enter_frame(loop_frame) self.blockvisit(node.body, loop_frame) if node.else_: self.writeline("%s = 0" % iteration_indicator) self.outdent() self.leave_frame( loop_frame, with_python_scope=node.recursive and not node.else_ ) if node.else_: self.writeline("if %s:" % iteration_indicator) self.indent() self.enter_frame(else_frame) self.blockvisit(node.else_, else_frame) self.leave_frame(else_frame) self.outdent() # if the node was recursive we have to return the buffer contents # and start the iteration code if node.recursive: self.return_buffer_contents(loop_frame) self.outdent() self.start_write(frame, node) if self.environment.is_async: self.write("await ") self.write("loop(") if self.environment.is_async: self.write("auto_aiter(") self.visit(node.iter, frame) if self.environment.is_async: self.write(")") self.write(", loop)") self.end_write(frame) def visit_If(self, node, frame): if_frame = frame.soft() self.writeline("if ", node) self.visit(node.test, if_frame) self.write(":") self.indent() self.blockvisit(node.body, if_frame) self.outdent() for elif_ in node.elif_: self.writeline("elif ", elif_) self.visit(elif_.test, if_frame) self.write(":") self.indent() self.blockvisit(elif_.body, if_frame) self.outdent() if node.else_: self.writeline("else:") self.indent() self.blockvisit(node.else_, if_frame) self.outdent() def visit_Macro(self, node, frame): macro_frame, macro_ref = self.macro_body(node, frame) self.newline() if frame.toplevel: if not node.name.startswith("_"): self.write("context.exported_vars.add(%r)" % node.name) self.writeline("context.vars[%r] = " % node.name) self.write("%s = " % frame.symbols.ref(node.name)) self.macro_def(macro_ref, macro_frame) def visit_CallBlock(self, node, frame): call_frame, macro_ref = self.macro_body(node, frame) self.writeline("caller = ") self.macro_def(macro_ref, call_frame) self.start_write(frame, node) self.visit_Call(node.call, frame, forward_caller=True) self.end_write(frame) def visit_FilterBlock(self, node, frame): filter_frame = frame.inner() filter_frame.symbols.analyze_node(node) self.enter_frame(filter_frame) self.buffer(filter_frame) self.blockvisit(node.body, filter_frame) self.start_write(frame, node) self.visit_Filter(node.filter, filter_frame) self.end_write(frame) self.leave_frame(filter_frame) def visit_With(self, node, frame): with_frame = frame.inner() with_frame.symbols.analyze_node(node) self.enter_frame(with_frame) for target, expr in izip(node.targets, node.values): self.newline() self.visit(target, with_frame) self.write(" = ") self.visit(expr, frame) self.blockvisit(node.body, with_frame) self.leave_frame(with_frame) def visit_ExprStmt(self, node, frame): self.newline(node) self.visit(node.node, frame) _FinalizeInfo = namedtuple("_FinalizeInfo", ("const", "src")) #: The default finalize function if the environment isn't configured #: with one. Or if the environment has one, this is called on that #: function's output for constants. _default_finalize = text_type _finalize = None def _make_finalize(self): """Build the finalize function to be used on constants and at runtime. Cached so it's only created once for all output nodes. Returns a ``namedtuple`` with the following attributes: ``const`` A function to finalize constant data at compile time. ``src`` Source code to output around nodes to be evaluated at runtime. """ if self._finalize is not None: return self._finalize finalize = default = self._default_finalize src = None if self.environment.finalize: src = "environment.finalize(" env_finalize = self.environment.finalize def finalize(value): return default(env_finalize(value)) if getattr(env_finalize, "contextfunction", False) is True: src += "context, " finalize = None # noqa: F811 elif getattr(env_finalize, "evalcontextfunction", False) is True: src += "context.eval_ctx, " finalize = None elif getattr(env_finalize, "environmentfunction", False) is True: src += "environment, " def finalize(value): return default(env_finalize(self.environment, value)) self._finalize = self._FinalizeInfo(finalize, src) return self._finalize def _output_const_repr(self, group): """Given a group of constant values converted from ``Output`` child nodes, produce a string to write to the template module source. """ return repr(concat(group)) def _output_child_to_const(self, node, frame, finalize): """Try to optimize a child of an ``Output`` node by trying to convert it to constant, finalized data at compile time. If :exc:`Impossible` is raised, the node is not constant and will be evaluated at runtime. Any other exception will also be evaluated at runtime for easier debugging. """ const = node.as_const(frame.eval_ctx) if frame.eval_ctx.autoescape: const = escape(const) # Template data doesn't go through finalize. if isinstance(node, nodes.TemplateData): return text_type(const) return finalize.const(const) def _output_child_pre(self, node, frame, finalize): """Output extra source code before visiting a child of an ``Output`` node. """ if frame.eval_ctx.volatile: self.write("(escape if context.eval_ctx.autoescape else to_string)(") elif frame.eval_ctx.autoescape: self.write("escape(") else: self.write("to_string(") if finalize.src is not None: self.write(finalize.src) def _output_child_post(self, node, frame, finalize): """Output extra source code after visiting a child of an ``Output`` node. """ self.write(")") if finalize.src is not None: self.write(")") def visit_Output(self, node, frame): # If an extends is active, don't render outside a block. if frame.require_output_check: # A top-level extends is known to exist at compile time. if self.has_known_extends: return self.writeline("if parent_template is None:") self.indent() finalize = self._make_finalize() body = [] # Evaluate constants at compile time if possible. Each item in # body will be either a list of static data or a node to be # evaluated at runtime. for child in node.nodes: try: if not ( # If the finalize function requires runtime context, # constants can't be evaluated at compile time. finalize.const # Unless it's basic template data that won't be # finalized anyway. or isinstance(child, nodes.TemplateData) ): raise nodes.Impossible() const = self._output_child_to_const(child, frame, finalize) except (nodes.Impossible, Exception): # The node was not constant and needs to be evaluated at # runtime. Or another error was raised, which is easier # to debug at runtime. body.append(child) continue if body and isinstance(body[-1], list): body[-1].append(const) else: body.append([const]) if frame.buffer is not None: if len(body) == 1: self.writeline("%s.append(" % frame.buffer) else: self.writeline("%s.extend((" % frame.buffer) self.indent() for item in body: if isinstance(item, list): # A group of constant data to join and output. val = self._output_const_repr(item) if frame.buffer is None: self.writeline("yield " + val) else: self.writeline(val + ",") else: if frame.buffer is None: self.writeline("yield ", item) else: self.newline(item) # A node to be evaluated at runtime. self._output_child_pre(item, frame, finalize) self.visit(item, frame) self._output_child_post(item, frame, finalize) if frame.buffer is not None: self.write(",") if frame.buffer is not None: self.outdent() self.writeline(")" if len(body) == 1 else "))") if frame.require_output_check: self.outdent() def visit_Assign(self, node, frame): self.push_assign_tracking() self.newline(node) self.visit(node.target, frame) self.write(" = ") self.visit(node.node, frame) self.pop_assign_tracking(frame) def visit_AssignBlock(self, node, frame): self.push_assign_tracking() block_frame = frame.inner() # This is a special case. Since a set block always captures we # will disable output checks. This way one can use set blocks # toplevel even in extended templates. block_frame.require_output_check = False block_frame.symbols.analyze_node(node) self.enter_frame(block_frame) self.buffer(block_frame) self.blockvisit(node.body, block_frame) self.newline(node) self.visit(node.target, frame) self.write(" = (Markup if context.eval_ctx.autoescape else identity)(") if node.filter is not None: self.visit_Filter(node.filter, block_frame) else: self.write("concat(%s)" % block_frame.buffer) self.write(")") self.pop_assign_tracking(frame) self.leave_frame(block_frame) # -- Expression Visitors def visit_Name(self, node, frame): if node.ctx == "store" and frame.toplevel: if self._assign_stack: self._assign_stack[-1].add(node.name) ref = frame.symbols.ref(node.name) # If we are looking up a variable we might have to deal with the # case where it's undefined. We can skip that case if the load # instruction indicates a parameter which are always defined. if node.ctx == "load": load = frame.symbols.find_load(ref) if not ( load is not None and load[0] == VAR_LOAD_PARAMETER and not self.parameter_is_undeclared(ref) ): self.write( "(undefined(name=%r) if %s is missing else %s)" % (node.name, ref, ref) ) return self.write(ref) def visit_NSRef(self, node, frame): # NSRefs can only be used to store values; since they use the normal # `foo.bar` notation they will be parsed as a normal attribute access # when used anywhere but in a `set` context ref = frame.symbols.ref(node.name) self.writeline("if not isinstance(%s, Namespace):" % ref) self.indent() self.writeline( "raise TemplateRuntimeError(%r)" % "cannot assign attribute on non-namespace object" ) self.outdent() self.writeline("%s[%r]" % (ref, node.attr)) def visit_Const(self, node, frame): val = node.as_const(frame.eval_ctx) if isinstance(val, float): self.write(str(val)) else: self.write(repr(val)) def visit_TemplateData(self, node, frame): try: self.write(repr(node.as_const(frame.eval_ctx))) except nodes.Impossible: self.write( "(Markup if context.eval_ctx.autoescape else identity)(%r)" % node.data ) def visit_Tuple(self, node, frame): self.write("(") idx = -1 for idx, item in enumerate(node.items): if idx: self.write(", ") self.visit(item, frame) self.write(idx == 0 and ",)" or ")") def visit_List(self, node, frame): self.write("[") for idx, item in enumerate(node.items): if idx: self.write(", ") self.visit(item, frame) self.write("]") def visit_Dict(self, node, frame): self.write("{") for idx, item in enumerate(node.items): if idx: self.write(", ") self.visit(item.key, frame) self.write(": ") self.visit(item.value, frame) self.write("}") def binop(operator, interceptable=True): # noqa: B902 @optimizeconst def visitor(self, node, frame): if ( self.environment.sandboxed and operator in self.environment.intercepted_binops ): self.write("environment.call_binop(context, %r, " % operator) self.visit(node.left, frame) self.write(", ") self.visit(node.right, frame) else: self.write("(") self.visit(node.left, frame) self.write(" %s " % operator) self.visit(node.right, frame) self.write(")") return visitor def uaop(operator, interceptable=True): # noqa: B902 @optimizeconst def visitor(self, node, frame): if ( self.environment.sandboxed and operator in self.environment.intercepted_unops ): self.write("environment.call_unop(context, %r, " % operator) self.visit(node.node, frame) else: self.write("(" + operator) self.visit(node.node, frame) self.write(")") return visitor visit_Add = binop("+") visit_Sub = binop("-") visit_Mul = binop("*") visit_Div = binop("/") visit_FloorDiv = binop("//") visit_Pow = binop("**") visit_Mod = binop("%") visit_And = binop("and", interceptable=False) visit_Or = binop("or", interceptable=False) visit_Pos = uaop("+") visit_Neg = uaop("-") visit_Not = uaop("not ", interceptable=False) del binop, uaop @optimizeconst def visit_Concat(self, node, frame): if frame.eval_ctx.volatile: func_name = "(context.eval_ctx.volatile and markup_join or unicode_join)" elif frame.eval_ctx.autoescape: func_name = "markup_join" else: func_name = "unicode_join" self.write("%s((" % func_name) for arg in node.nodes: self.visit(arg, frame) self.write(", ") self.write("))") @optimizeconst def visit_Compare(self, node, frame): self.write("(") self.visit(node.expr, frame) for op in node.ops: self.visit(op, frame) self.write(")") def visit_Operand(self, node, frame): self.write(" %s " % operators[node.op]) self.visit(node.expr, frame) @optimizeconst def visit_Getattr(self, node, frame): if self.environment.is_async: self.write("(await auto_await(") self.write("environment.getattr(") self.visit(node.node, frame) self.write(", %r)" % node.attr) if self.environment.is_async: self.write("))") @optimizeconst def visit_Getitem(self, node, frame): # slices bypass the environment getitem method. if isinstance(node.arg, nodes.Slice): self.visit(node.node, frame) self.write("[") self.visit(node.arg, frame) self.write("]") else: if self.environment.is_async: self.write("(await auto_await(") self.write("environment.getitem(") self.visit(node.node, frame) self.write(", ") self.visit(node.arg, frame) self.write(")") if self.environment.is_async: self.write("))") def visit_Slice(self, node, frame): if node.start is not None: self.visit(node.start, frame) self.write(":") if node.stop is not None: self.visit(node.stop, frame) if node.step is not None: self.write(":") self.visit(node.step, frame) @optimizeconst def visit_Filter(self, node, frame): if self.environment.is_async: self.write("await auto_await(") self.write(self.filters[node.name] + "(") func = self.environment.filters.get(node.name) if func is None: self.fail("no filter named %r" % node.name, node.lineno) if getattr(func, "contextfilter", False) is True: self.write("context, ") elif getattr(func, "evalcontextfilter", False) is True: self.write("context.eval_ctx, ") elif getattr(func, "environmentfilter", False) is True: self.write("environment, ") # if the filter node is None we are inside a filter block # and want to write to the current buffer if node.node is not None: self.visit(node.node, frame) elif frame.eval_ctx.volatile: self.write( "(context.eval_ctx.autoescape and" " Markup(concat(%s)) or concat(%s))" % (frame.buffer, frame.buffer) ) elif frame.eval_ctx.autoescape: self.write("Markup(concat(%s))" % frame.buffer) else: self.write("concat(%s)" % frame.buffer) self.signature(node, frame) self.write(")") if self.environment.is_async: self.write(")") @optimizeconst def visit_Test(self, node, frame): self.write(self.tests[node.name] + "(") if node.name not in self.environment.tests: self.fail("no test named %r" % node.name, node.lineno) self.visit(node.node, frame) self.signature(node, frame) self.write(")") @optimizeconst def visit_CondExpr(self, node, frame): def write_expr2(): if node.expr2 is not None: return self.visit(node.expr2, frame) self.write( "cond_expr_undefined(%r)" % ( "the inline if-" "expression on %s evaluated to false and " "no else section was defined." % self.position(node) ) ) self.write("(") self.visit(node.expr1, frame) self.write(" if ") self.visit(node.test, frame) self.write(" else ") write_expr2() self.write(")") @optimizeconst def visit_Call(self, node, frame, forward_caller=False): if self.environment.is_async: self.write("await auto_await(") if self.environment.sandboxed: self.write("environment.call(context, ") else: self.write("context.call(") self.visit(node.node, frame) extra_kwargs = forward_caller and {"caller": "caller"} or None self.signature(node, frame, extra_kwargs) self.write(")") if self.environment.is_async: self.write(")") def visit_Keyword(self, node, frame): self.write(node.key + "=") self.visit(node.value, frame) # -- Unused nodes for extensions def visit_MarkSafe(self, node, frame): self.write("Markup(") self.visit(node.expr, frame) self.write(")") def visit_MarkSafeIfAutoescape(self, node, frame): self.write("(context.eval_ctx.autoescape and Markup or identity)(") self.visit(node.expr, frame) self.write(")") def visit_EnvironmentAttribute(self, node, frame): self.write("environment." + node.name) def visit_ExtensionAttribute(self, node, frame): self.write("environment.extensions[%r].%s" % (node.identifier, node.name)) def visit_ImportedName(self, node, frame): self.write(self.import_aliases[node.importname]) def visit_InternalName(self, node, frame): self.write(node.name) def visit_ContextReference(self, node, frame): self.write("context") def visit_DerivedContextReference(self, node, frame): self.write(self.derive_context(frame)) def visit_Continue(self, node, frame): self.writeline("continue", node) def visit_Break(self, node, frame): self.writeline("break", node) def visit_Scope(self, node, frame): scope_frame = frame.inner() scope_frame.symbols.analyze_node(node) self.enter_frame(scope_frame) self.blockvisit(node.body, scope_frame) self.leave_frame(scope_frame) def visit_OverlayScope(self, node, frame): ctx = self.temporary_identifier() self.writeline("%s = %s" % (ctx, self.derive_context(frame))) self.writeline("%s.vars = " % ctx) self.visit(node.context, frame) self.push_context_reference(ctx) scope_frame = frame.inner(isolated=True) scope_frame.symbols.analyze_node(node) self.enter_frame(scope_frame) self.blockvisit(node.body, scope_frame) self.leave_frame(scope_frame) self.pop_context_reference() def visit_EvalContextModifier(self, node, frame): for keyword in node.options: self.writeline("context.eval_ctx.%s = " % keyword.key) self.visit(keyword.value, frame) try: val = keyword.value.as_const(frame.eval_ctx) except nodes.Impossible: frame.eval_ctx.volatile = True else: setattr(frame.eval_ctx, keyword.key, val) def visit_ScopedEvalContextModifier(self, node, frame): old_ctx_name = self.temporary_identifier() saved_ctx = frame.eval_ctx.save() self.writeline("%s = context.eval_ctx.save()" % old_ctx_name) self.visit_EvalContextModifier(node, frame) for child in node.body: self.visit(child, frame) frame.eval_ctx.revert(saved_ctx) self.writeline("context.eval_ctx.revert(%s)" % old_ctx_name)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/constants.py
# -*- coding: utf-8 -*- #: list of lorem ipsum words used by the lipsum() helper function LOREM_IPSUM_WORDS = u"""\ a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at auctor augue bibendum blandit class commodo condimentum congue consectetuer consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque penatibus per pharetra phasellus placerat platea porta porttitor posuere potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus viverra volutpat vulputate"""
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/loaders.py
# -*- coding: utf-8 -*- """API and implementations for loading templates from different data sources. """ import os import sys import weakref from hashlib import sha1 from os import path from types import ModuleType from ._compat import abc from ._compat import fspath from ._compat import iteritems from ._compat import string_types from .exceptions import TemplateNotFound from .utils import internalcode from .utils import open_if_exists def split_template_path(template): """Split a path into segments and perform a sanity check. If it detects '..' in the path it will raise a `TemplateNotFound` error. """ pieces = [] for piece in template.split("/"): if ( path.sep in piece or (path.altsep and path.altsep in piece) or piece == path.pardir ): raise TemplateNotFound(template) elif piece and piece != ".": pieces.append(piece) return pieces class BaseLoader(object): """Baseclass for all loaders. Subclass this and override `get_source` to implement a custom loading mechanism. The environment provides a `get_template` method that calls the loader's `load` method to get the :class:`Template` object. A very basic example for a loader that looks up templates on the file system could look like this:: from jinja2 import BaseLoader, TemplateNotFound from os.path import join, exists, getmtime class MyLoader(BaseLoader): def __init__(self, path): self.path = path def get_source(self, environment, template): path = join(self.path, template) if not exists(path): raise TemplateNotFound(template) mtime = getmtime(path) with file(path) as f: source = f.read().decode('utf-8') return source, path, lambda: mtime == getmtime(path) """ #: if set to `False` it indicates that the loader cannot provide access #: to the source of templates. #: #: .. versionadded:: 2.4 has_source_access = True def get_source(self, environment, template): """Get the template source, filename and reload helper for a template. It's passed the environment and template name and has to return a tuple in the form ``(source, filename, uptodate)`` or raise a `TemplateNotFound` error if it can't locate the template. The source part of the returned tuple must be the source of the template as unicode string or a ASCII bytestring. The filename should be the name of the file on the filesystem if it was loaded from there, otherwise `None`. The filename is used by python for the tracebacks if no loader extension is used. The last item in the tuple is the `uptodate` function. If auto reloading is enabled it's always called to check if the template changed. No arguments are passed so the function must store the old state somewhere (for example in a closure). If it returns `False` the template will be reloaded. """ if not self.has_source_access: raise RuntimeError( "%s cannot provide access to the source" % self.__class__.__name__ ) raise TemplateNotFound(template) def list_templates(self): """Iterates over all templates. If the loader does not support that it should raise a :exc:`TypeError` which is the default behavior. """ raise TypeError("this loader cannot iterate over all templates") @internalcode def load(self, environment, name, globals=None): """Loads a template. This method looks up the template in the cache or loads one by calling :meth:`get_source`. Subclasses should not override this method as loaders working on collections of other loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`) will not call this method but `get_source` directly. """ code = None if globals is None: globals = {} # first we try to get the source for this template together # with the filename and the uptodate function. source, filename, uptodate = self.get_source(environment, name) # try to load the code from the bytecode cache if there is a # bytecode cache configured. bcc = environment.bytecode_cache if bcc is not None: bucket = bcc.get_bucket(environment, name, filename, source) code = bucket.code # if we don't have code so far (not cached, no longer up to # date) etc. we compile the template if code is None: code = environment.compile(source, name, filename) # if the bytecode cache is available and the bucket doesn't # have a code so far, we give the bucket the new code and put # it back to the bytecode cache. if bcc is not None and bucket.code is None: bucket.code = code bcc.set_bucket(bucket) return environment.template_class.from_code( environment, code, globals, uptodate ) class FileSystemLoader(BaseLoader): """Loads templates from the file system. This loader can find templates in folders on the file system and is the preferred way to load them. The loader takes the path to the templates as string, or if multiple locations are wanted a list of them which is then looked up in the given order:: >>> loader = FileSystemLoader('/path/to/templates') >>> loader = FileSystemLoader(['/path/to/templates', '/other/path']) Per default the template encoding is ``'utf-8'`` which can be changed by setting the `encoding` parameter to something else. To follow symbolic links, set the *followlinks* parameter to ``True``:: >>> loader = FileSystemLoader('/path/to/templates', followlinks=True) .. versionchanged:: 2.8 The ``followlinks`` parameter was added. """ def __init__(self, searchpath, encoding="utf-8", followlinks=False): if not isinstance(searchpath, abc.Iterable) or isinstance( searchpath, string_types ): searchpath = [searchpath] # In Python 3.5, os.path.join doesn't support Path. This can be # simplified to list(searchpath) when Python 3.5 is dropped. self.searchpath = [fspath(p) for p in searchpath] self.encoding = encoding self.followlinks = followlinks def get_source(self, environment, template): pieces = split_template_path(template) for searchpath in self.searchpath: filename = path.join(searchpath, *pieces) f = open_if_exists(filename) if f is None: continue try: contents = f.read().decode(self.encoding) finally: f.close() mtime = path.getmtime(filename) def uptodate(): try: return path.getmtime(filename) == mtime except OSError: return False return contents, filename, uptodate raise TemplateNotFound(template) def list_templates(self): found = set() for searchpath in self.searchpath: walk_dir = os.walk(searchpath, followlinks=self.followlinks) for dirpath, _, filenames in walk_dir: for filename in filenames: template = ( os.path.join(dirpath, filename)[len(searchpath) :] .strip(os.path.sep) .replace(os.path.sep, "/") ) if template[:2] == "./": template = template[2:] if template not in found: found.add(template) return sorted(found) class PackageLoader(BaseLoader): """Load templates from python eggs or packages. It is constructed with the name of the python package and the path to the templates in that package:: loader = PackageLoader('mypackage', 'views') If the package path is not given, ``'templates'`` is assumed. Per default the template encoding is ``'utf-8'`` which can be changed by setting the `encoding` parameter to something else. Due to the nature of eggs it's only possible to reload templates if the package was loaded from the file system and not a zip file. """ def __init__(self, package_name, package_path="templates", encoding="utf-8"): from pkg_resources import DefaultProvider from pkg_resources import get_provider from pkg_resources import ResourceManager provider = get_provider(package_name) self.encoding = encoding self.manager = ResourceManager() self.filesystem_bound = isinstance(provider, DefaultProvider) self.provider = provider self.package_path = package_path def get_source(self, environment, template): pieces = split_template_path(template) p = "/".join((self.package_path,) + tuple(pieces)) if not self.provider.has_resource(p): raise TemplateNotFound(template) filename = uptodate = None if self.filesystem_bound: filename = self.provider.get_resource_filename(self.manager, p) mtime = path.getmtime(filename) def uptodate(): try: return path.getmtime(filename) == mtime except OSError: return False source = self.provider.get_resource_string(self.manager, p) return source.decode(self.encoding), filename, uptodate def list_templates(self): path = self.package_path if path[:2] == "./": path = path[2:] elif path == ".": path = "" offset = len(path) results = [] def _walk(path): for filename in self.provider.resource_listdir(path): fullname = path + "/" + filename if self.provider.resource_isdir(fullname): _walk(fullname) else: results.append(fullname[offset:].lstrip("/")) _walk(path) results.sort() return results class DictLoader(BaseLoader): """Loads a template from a python dict. It's passed a dict of unicode strings bound to template names. This loader is useful for unittesting: >>> loader = DictLoader({'index.html': 'source here'}) Because auto reloading is rarely useful this is disabled per default. """ def __init__(self, mapping): self.mapping = mapping def get_source(self, environment, template): if template in self.mapping: source = self.mapping[template] return source, None, lambda: source == self.mapping.get(template) raise TemplateNotFound(template) def list_templates(self): return sorted(self.mapping) class FunctionLoader(BaseLoader): """A loader that is passed a function which does the loading. The function receives the name of the template and has to return either an unicode string with the template source, a tuple in the form ``(source, filename, uptodatefunc)`` or `None` if the template does not exist. >>> def load_template(name): ... if name == 'index.html': ... return '...' ... >>> loader = FunctionLoader(load_template) The `uptodatefunc` is a function that is called if autoreload is enabled and has to return `True` if the template is still up to date. For more details have a look at :meth:`BaseLoader.get_source` which has the same return value. """ def __init__(self, load_func): self.load_func = load_func def get_source(self, environment, template): rv = self.load_func(template) if rv is None: raise TemplateNotFound(template) elif isinstance(rv, string_types): return rv, None, None return rv class PrefixLoader(BaseLoader): """A loader that is passed a dict of loaders where each loader is bound to a prefix. The prefix is delimited from the template by a slash per default, which can be changed by setting the `delimiter` argument to something else:: loader = PrefixLoader({ 'app1': PackageLoader('mypackage.app1'), 'app2': PackageLoader('mypackage.app2') }) By loading ``'app1/index.html'`` the file from the app1 package is loaded, by loading ``'app2/index.html'`` the file from the second. """ def __init__(self, mapping, delimiter="/"): self.mapping = mapping self.delimiter = delimiter def get_loader(self, template): try: prefix, name = template.split(self.delimiter, 1) loader = self.mapping[prefix] except (ValueError, KeyError): raise TemplateNotFound(template) return loader, name def get_source(self, environment, template): loader, name = self.get_loader(template) try: return loader.get_source(environment, name) except TemplateNotFound: # re-raise the exception with the correct filename here. # (the one that includes the prefix) raise TemplateNotFound(template) @internalcode def load(self, environment, name, globals=None): loader, local_name = self.get_loader(name) try: return loader.load(environment, local_name, globals) except TemplateNotFound: # re-raise the exception with the correct filename here. # (the one that includes the prefix) raise TemplateNotFound(name) def list_templates(self): result = [] for prefix, loader in iteritems(self.mapping): for template in loader.list_templates(): result.append(prefix + self.delimiter + template) return result class ChoiceLoader(BaseLoader): """This loader works like the `PrefixLoader` just that no prefix is specified. If a template could not be found by one loader the next one is tried. >>> loader = ChoiceLoader([ ... FileSystemLoader('/path/to/user/templates'), ... FileSystemLoader('/path/to/system/templates') ... ]) This is useful if you want to allow users to override builtin templates from a different location. """ def __init__(self, loaders): self.loaders = loaders def get_source(self, environment, template): for loader in self.loaders: try: return loader.get_source(environment, template) except TemplateNotFound: pass raise TemplateNotFound(template) @internalcode def load(self, environment, name, globals=None): for loader in self.loaders: try: return loader.load(environment, name, globals) except TemplateNotFound: pass raise TemplateNotFound(name) def list_templates(self): found = set() for loader in self.loaders: found.update(loader.list_templates()) return sorted(found) class _TemplateModule(ModuleType): """Like a normal module but with support for weak references""" class ModuleLoader(BaseLoader): """This loader loads templates from precompiled templates. Example usage: >>> loader = ChoiceLoader([ ... ModuleLoader('/path/to/compiled/templates'), ... FileSystemLoader('/path/to/templates') ... ]) Templates can be precompiled with :meth:`Environment.compile_templates`. """ has_source_access = False def __init__(self, path): package_name = "_jinja2_module_templates_%x" % id(self) # create a fake module that looks for the templates in the # path given. mod = _TemplateModule(package_name) if not isinstance(path, abc.Iterable) or isinstance(path, string_types): path = [path] mod.__path__ = [fspath(p) for p in path] sys.modules[package_name] = weakref.proxy( mod, lambda x: sys.modules.pop(package_name, None) ) # the only strong reference, the sys.modules entry is weak # so that the garbage collector can remove it once the # loader that created it goes out of business. self.module = mod self.package_name = package_name @staticmethod def get_template_key(name): return "tmpl_" + sha1(name.encode("utf-8")).hexdigest() @staticmethod def get_module_filename(name): return ModuleLoader.get_template_key(name) + ".py" @internalcode def load(self, environment, name, globals=None): key = self.get_template_key(name) module = "%s.%s" % (self.package_name, key) mod = getattr(self.module, module, None) if mod is None: try: mod = __import__(module, None, None, ["root"]) except ImportError: raise TemplateNotFound(name) # remove the entry from sys.modules, we only want the attribute # on the module object we have stored on the loader. sys.modules.pop(module, None) return environment.template_class.from_module_dict( environment, mod.__dict__, globals )
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/__init__.py
# -*- coding: utf-8 -*- """Jinja is a template engine written in pure Python. It provides a non-XML syntax that supports inline expressions and an optional sandboxed environment. """ from markupsafe import escape from markupsafe import Markup from .bccache import BytecodeCache from .bccache import FileSystemBytecodeCache from .bccache import MemcachedBytecodeCache from .environment import Environment from .environment import Template from .exceptions import TemplateAssertionError from .exceptions import TemplateError from .exceptions import TemplateNotFound from .exceptions import TemplateRuntimeError from .exceptions import TemplatesNotFound from .exceptions import TemplateSyntaxError from .exceptions import UndefinedError from .filters import contextfilter from .filters import environmentfilter from .filters import evalcontextfilter from .loaders import BaseLoader from .loaders import ChoiceLoader from .loaders import DictLoader from .loaders import FileSystemLoader from .loaders import FunctionLoader from .loaders import ModuleLoader from .loaders import PackageLoader from .loaders import PrefixLoader from .runtime import ChainableUndefined from .runtime import DebugUndefined from .runtime import make_logging_undefined from .runtime import StrictUndefined from .runtime import Undefined from .utils import clear_caches from .utils import contextfunction from .utils import environmentfunction from .utils import evalcontextfunction from .utils import is_undefined from .utils import select_autoescape __version__ = "2.11.2"
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/idtracking.py
from ._compat import iteritems from .visitor import NodeVisitor VAR_LOAD_PARAMETER = "param" VAR_LOAD_RESOLVE = "resolve" VAR_LOAD_ALIAS = "alias" VAR_LOAD_UNDEFINED = "undefined" def find_symbols(nodes, parent_symbols=None): sym = Symbols(parent=parent_symbols) visitor = FrameSymbolVisitor(sym) for node in nodes: visitor.visit(node) return sym def symbols_for_node(node, parent_symbols=None): sym = Symbols(parent=parent_symbols) sym.analyze_node(node) return sym class Symbols(object): def __init__(self, parent=None, level=None): if level is None: if parent is None: level = 0 else: level = parent.level + 1 self.level = level self.parent = parent self.refs = {} self.loads = {} self.stores = set() def analyze_node(self, node, **kwargs): visitor = RootVisitor(self) visitor.visit(node, **kwargs) def _define_ref(self, name, load=None): ident = "l_%d_%s" % (self.level, name) self.refs[name] = ident if load is not None: self.loads[ident] = load return ident def find_load(self, target): if target in self.loads: return self.loads[target] if self.parent is not None: return self.parent.find_load(target) def find_ref(self, name): if name in self.refs: return self.refs[name] if self.parent is not None: return self.parent.find_ref(name) def ref(self, name): rv = self.find_ref(name) if rv is None: raise AssertionError( "Tried to resolve a name to a reference that " "was unknown to the frame (%r)" % name ) return rv def copy(self): rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.refs = self.refs.copy() rv.loads = self.loads.copy() rv.stores = self.stores.copy() return rv def store(self, name): self.stores.add(name) # If we have not see the name referenced yet, we need to figure # out what to set it to. if name not in self.refs: # If there is a parent scope we check if the name has a # reference there. If it does it means we might have to alias # to a variable there. if self.parent is not None: outer_ref = self.parent.find_ref(name) if outer_ref is not None: self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref)) return # Otherwise we can just set it to undefined. self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None)) def declare_parameter(self, name): self.stores.add(name) return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None)) def load(self, name): target = self.find_ref(name) if target is None: self._define_ref(name, load=(VAR_LOAD_RESOLVE, name)) def branch_update(self, branch_symbols): stores = {} for branch in branch_symbols: for target in branch.stores: if target in self.stores: continue stores[target] = stores.get(target, 0) + 1 for sym in branch_symbols: self.refs.update(sym.refs) self.loads.update(sym.loads) self.stores.update(sym.stores) for name, branch_count in iteritems(stores): if branch_count == len(branch_symbols): continue target = self.find_ref(name) assert target is not None, "should not happen" if self.parent is not None: outer_target = self.parent.find_ref(name) if outer_target is not None: self.loads[target] = (VAR_LOAD_ALIAS, outer_target) continue self.loads[target] = (VAR_LOAD_RESOLVE, name) def dump_stores(self): rv = {} node = self while node is not None: for name in node.stores: if name not in rv: rv[name] = self.find_ref(name) node = node.parent return rv def dump_param_targets(self): rv = set() node = self while node is not None: for target, (instr, _) in iteritems(self.loads): if instr == VAR_LOAD_PARAMETER: rv.add(target) node = node.parent return rv class RootVisitor(NodeVisitor): def __init__(self, symbols): self.sym_visitor = FrameSymbolVisitor(symbols) def _simple_visit(self, node, **kwargs): for child in node.iter_child_nodes(): self.sym_visitor.visit(child) visit_Template = ( visit_Block ) = ( visit_Macro ) = ( visit_FilterBlock ) = visit_Scope = visit_If = visit_ScopedEvalContextModifier = _simple_visit def visit_AssignBlock(self, node, **kwargs): for child in node.body: self.sym_visitor.visit(child) def visit_CallBlock(self, node, **kwargs): for child in node.iter_child_nodes(exclude=("call",)): self.sym_visitor.visit(child) def visit_OverlayScope(self, node, **kwargs): for child in node.body: self.sym_visitor.visit(child) def visit_For(self, node, for_branch="body", **kwargs): if for_branch == "body": self.sym_visitor.visit(node.target, store_as_param=True) branch = node.body elif for_branch == "else": branch = node.else_ elif for_branch == "test": self.sym_visitor.visit(node.target, store_as_param=True) if node.test is not None: self.sym_visitor.visit(node.test) return else: raise RuntimeError("Unknown for branch") for item in branch or (): self.sym_visitor.visit(item) def visit_With(self, node, **kwargs): for target in node.targets: self.sym_visitor.visit(target) for child in node.body: self.sym_visitor.visit(child) def generic_visit(self, node, *args, **kwargs): raise NotImplementedError( "Cannot find symbols for %r" % node.__class__.__name__ ) class FrameSymbolVisitor(NodeVisitor): """A visitor for `Frame.inspect`.""" def __init__(self, symbols): self.symbols = symbols def visit_Name(self, node, store_as_param=False, **kwargs): """All assignments to names go through this function.""" if store_as_param or node.ctx == "param": self.symbols.declare_parameter(node.name) elif node.ctx == "store": self.symbols.store(node.name) elif node.ctx == "load": self.symbols.load(node.name) def visit_NSRef(self, node, **kwargs): self.symbols.load(node.name) def visit_If(self, node, **kwargs): self.visit(node.test, **kwargs) original_symbols = self.symbols def inner_visit(nodes): self.symbols = rv = original_symbols.copy() for subnode in nodes: self.visit(subnode, **kwargs) self.symbols = original_symbols return rv body_symbols = inner_visit(node.body) elif_symbols = inner_visit(node.elif_) else_symbols = inner_visit(node.else_ or ()) self.symbols.branch_update([body_symbols, elif_symbols, else_symbols]) def visit_Macro(self, node, **kwargs): self.symbols.store(node.name) def visit_Import(self, node, **kwargs): self.generic_visit(node, **kwargs) self.symbols.store(node.target) def visit_FromImport(self, node, **kwargs): self.generic_visit(node, **kwargs) for name in node.names: if isinstance(name, tuple): self.symbols.store(name[1]) else: self.symbols.store(name) def visit_Assign(self, node, **kwargs): """Visit assignments in the correct order.""" self.visit(node.node, **kwargs) self.visit(node.target, **kwargs) def visit_For(self, node, **kwargs): """Visiting stops at for blocks. However the block sequence is visited as part of the outer scope. """ self.visit(node.iter, **kwargs) def visit_CallBlock(self, node, **kwargs): self.visit(node.call, **kwargs) def visit_FilterBlock(self, node, **kwargs): self.visit(node.filter, **kwargs) def visit_With(self, node, **kwargs): for target in node.values: self.visit(target) def visit_AssignBlock(self, node, **kwargs): """Stop visiting at block assigns.""" self.visit(node.target, **kwargs) def visit_Scope(self, node, **kwargs): """Stop visiting at scopes.""" def visit_Block(self, node, **kwargs): """Stop visiting at blocks.""" def visit_OverlayScope(self, node, **kwargs): """Do not visit into overlay scopes."""
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/runtime.py
# -*- coding: utf-8 -*- """The runtime functions and state used by compiled templates.""" import sys from itertools import chain from types import MethodType from markupsafe import escape # noqa: F401 from markupsafe import Markup from markupsafe import soft_unicode from ._compat import abc from ._compat import imap from ._compat import implements_iterator from ._compat import implements_to_string from ._compat import iteritems from ._compat import PY2 from ._compat import string_types from ._compat import text_type from ._compat import with_metaclass from .exceptions import TemplateNotFound # noqa: F401 from .exceptions import TemplateRuntimeError # noqa: F401 from .exceptions import UndefinedError from .nodes import EvalContext from .utils import concat from .utils import evalcontextfunction from .utils import internalcode from .utils import missing from .utils import Namespace # noqa: F401 from .utils import object_type_repr # these variables are exported to the template runtime exported = [ "LoopContext", "TemplateReference", "Macro", "Markup", "TemplateRuntimeError", "missing", "concat", "escape", "markup_join", "unicode_join", "to_string", "identity", "TemplateNotFound", "Namespace", "Undefined", ] #: the name of the function that is used to convert something into #: a string. We can just use the text type here. to_string = text_type def identity(x): """Returns its argument. Useful for certain things in the environment. """ return x def markup_join(seq): """Concatenation that escapes if necessary and converts to unicode.""" buf = [] iterator = imap(soft_unicode, seq) for arg in iterator: buf.append(arg) if hasattr(arg, "__html__"): return Markup(u"").join(chain(buf, iterator)) return concat(buf) def unicode_join(seq): """Simple args to unicode conversion and concatenation.""" return concat(imap(text_type, seq)) def new_context( environment, template_name, blocks, vars=None, shared=None, globals=None, locals=None, ): """Internal helper for context creation.""" if vars is None: vars = {} if shared: parent = vars else: parent = dict(globals or (), **vars) if locals: # if the parent is shared a copy should be created because # we don't want to modify the dict passed if shared: parent = dict(parent) for key, value in iteritems(locals): if value is not missing: parent[key] = value return environment.context_class(environment, parent, template_name, blocks) class TemplateReference(object): """The `self` in templates.""" def __init__(self, context): self.__context = context def __getitem__(self, name): blocks = self.__context.blocks[name] return BlockReference(name, self.__context, blocks, 0) def __repr__(self): return "<%s %r>" % (self.__class__.__name__, self.__context.name) def _get_func(x): return getattr(x, "__func__", x) class ContextMeta(type): def __new__(mcs, name, bases, d): rv = type.__new__(mcs, name, bases, d) if bases == (): return rv resolve = _get_func(rv.resolve) default_resolve = _get_func(Context.resolve) resolve_or_missing = _get_func(rv.resolve_or_missing) default_resolve_or_missing = _get_func(Context.resolve_or_missing) # If we have a changed resolve but no changed default or missing # resolve we invert the call logic. if ( resolve is not default_resolve and resolve_or_missing is default_resolve_or_missing ): rv._legacy_resolve_mode = True elif ( resolve is default_resolve and resolve_or_missing is default_resolve_or_missing ): rv._fast_resolve_mode = True return rv def resolve_or_missing(context, key, missing=missing): if key in context.vars: return context.vars[key] if key in context.parent: return context.parent[key] return missing class Context(with_metaclass(ContextMeta)): """The template context holds the variables of a template. It stores the values passed to the template and also the names the template exports. Creating instances is neither supported nor useful as it's created automatically at various stages of the template evaluation and should not be created by hand. The context is immutable. Modifications on :attr:`parent` **must not** happen and modifications on :attr:`vars` are allowed from generated template code only. Template filters and global functions marked as :func:`contextfunction`\\s get the active context passed as first argument and are allowed to access the context read-only. The template context supports read only dict operations (`get`, `keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`, `__getitem__`, `__contains__`). Additionally there is a :meth:`resolve` method that doesn't fail with a `KeyError` but returns an :class:`Undefined` object for missing variables. """ # XXX: we want to eventually make this be a deprecation warning and # remove it. _legacy_resolve_mode = False _fast_resolve_mode = False def __init__(self, environment, parent, name, blocks): self.parent = parent self.vars = {} self.environment = environment self.eval_ctx = EvalContext(self.environment, name) self.exported_vars = set() self.name = name # create the initial mapping of blocks. Whenever template inheritance # takes place the runtime will update this mapping with the new blocks # from the template. self.blocks = dict((k, [v]) for k, v in iteritems(blocks)) # In case we detect the fast resolve mode we can set up an alias # here that bypasses the legacy code logic. if self._fast_resolve_mode: self.resolve_or_missing = MethodType(resolve_or_missing, self) def super(self, name, current): """Render a parent block.""" try: blocks = self.blocks[name] index = blocks.index(current) + 1 blocks[index] except LookupError: return self.environment.undefined( "there is no parent block called %r." % name, name="super" ) return BlockReference(name, self, blocks, index) def get(self, key, default=None): """Returns an item from the template context, if it doesn't exist `default` is returned. """ try: return self[key] except KeyError: return default def resolve(self, key): """Looks up a variable like `__getitem__` or `get` but returns an :class:`Undefined` object with the name of the name looked up. """ if self._legacy_resolve_mode: rv = resolve_or_missing(self, key) else: rv = self.resolve_or_missing(key) if rv is missing: return self.environment.undefined(name=key) return rv def resolve_or_missing(self, key): """Resolves a variable like :meth:`resolve` but returns the special `missing` value if it cannot be found. """ if self._legacy_resolve_mode: rv = self.resolve(key) if isinstance(rv, Undefined): rv = missing return rv return resolve_or_missing(self, key) def get_exported(self): """Get a new dict with the exported variables.""" return dict((k, self.vars[k]) for k in self.exported_vars) def get_all(self): """Return the complete context as dict including the exported variables. For optimizations reasons this might not return an actual copy so be careful with using it. """ if not self.vars: return self.parent if not self.parent: return self.vars return dict(self.parent, **self.vars) @internalcode def call(__self, __obj, *args, **kwargs): # noqa: B902 """Call the callable with the arguments and keyword arguments provided but inject the active context or environment as first argument if the callable is a :func:`contextfunction` or :func:`environmentfunction`. """ if __debug__: __traceback_hide__ = True # noqa # Allow callable classes to take a context if hasattr(__obj, "__call__"): # noqa: B004 fn = __obj.__call__ for fn_type in ( "contextfunction", "evalcontextfunction", "environmentfunction", ): if hasattr(fn, fn_type): __obj = fn break if callable(__obj): if getattr(__obj, "contextfunction", False) is True: args = (__self,) + args elif getattr(__obj, "evalcontextfunction", False) is True: args = (__self.eval_ctx,) + args elif getattr(__obj, "environmentfunction", False) is True: args = (__self.environment,) + args try: return __obj(*args, **kwargs) except StopIteration: return __self.environment.undefined( "value was undefined because " "a callable raised a " "StopIteration exception" ) def derived(self, locals=None): """Internal helper function to create a derived context. This is used in situations where the system needs a new context in the same template that is independent. """ context = new_context( self.environment, self.name, {}, self.get_all(), True, None, locals ) context.eval_ctx = self.eval_ctx context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks)) return context def _all(meth): # noqa: B902 def proxy(self): return getattr(self.get_all(), meth)() proxy.__doc__ = getattr(dict, meth).__doc__ proxy.__name__ = meth return proxy keys = _all("keys") values = _all("values") items = _all("items") # not available on python 3 if PY2: iterkeys = _all("iterkeys") itervalues = _all("itervalues") iteritems = _all("iteritems") del _all def __contains__(self, name): return name in self.vars or name in self.parent def __getitem__(self, key): """Lookup a variable or raise `KeyError` if the variable is undefined. """ item = self.resolve_or_missing(key) if item is missing: raise KeyError(key) return item def __repr__(self): return "<%s %s of %r>" % ( self.__class__.__name__, repr(self.get_all()), self.name, ) abc.Mapping.register(Context) class BlockReference(object): """One block on a template reference.""" def __init__(self, name, context, stack, depth): self.name = name self._context = context self._stack = stack self._depth = depth @property def super(self): """Super the block.""" if self._depth + 1 >= len(self._stack): return self._context.environment.undefined( "there is no parent block called %r." % self.name, name="super" ) return BlockReference(self.name, self._context, self._stack, self._depth + 1) @internalcode def __call__(self): rv = concat(self._stack[self._depth](self._context)) if self._context.eval_ctx.autoescape: rv = Markup(rv) return rv @implements_iterator class LoopContext: """A wrapper iterable for dynamic ``for`` loops, with information about the loop and iteration. """ #: Current iteration of the loop, starting at 0. index0 = -1 _length = None _after = missing _current = missing _before = missing _last_changed_value = missing def __init__(self, iterable, undefined, recurse=None, depth0=0): """ :param iterable: Iterable to wrap. :param undefined: :class:`Undefined` class to use for next and previous items. :param recurse: The function to render the loop body when the loop is marked recursive. :param depth0: Incremented when looping recursively. """ self._iterable = iterable self._iterator = self._to_iterator(iterable) self._undefined = undefined self._recurse = recurse #: How many levels deep a recursive loop currently is, starting at 0. self.depth0 = depth0 @staticmethod def _to_iterator(iterable): return iter(iterable) @property def length(self): """Length of the iterable. If the iterable is a generator or otherwise does not have a size, it is eagerly evaluated to get a size. """ if self._length is not None: return self._length try: self._length = len(self._iterable) except TypeError: iterable = list(self._iterator) self._iterator = self._to_iterator(iterable) self._length = len(iterable) + self.index + (self._after is not missing) return self._length def __len__(self): return self.length @property def depth(self): """How many levels deep a recursive loop currently is, starting at 1.""" return self.depth0 + 1 @property def index(self): """Current iteration of the loop, starting at 1.""" return self.index0 + 1 @property def revindex0(self): """Number of iterations from the end of the loop, ending at 0. Requires calculating :attr:`length`. """ return self.length - self.index @property def revindex(self): """Number of iterations from the end of the loop, ending at 1. Requires calculating :attr:`length`. """ return self.length - self.index0 @property def first(self): """Whether this is the first iteration of the loop.""" return self.index0 == 0 def _peek_next(self): """Return the next element in the iterable, or :data:`missing` if the iterable is exhausted. Only peeks one item ahead, caching the result in :attr:`_last` for use in subsequent checks. The cache is reset when :meth:`__next__` is called. """ if self._after is not missing: return self._after self._after = next(self._iterator, missing) return self._after @property def last(self): """Whether this is the last iteration of the loop. Causes the iterable to advance early. See :func:`itertools.groupby` for issues this can cause. The :func:`groupby` filter avoids that issue. """ return self._peek_next() is missing @property def previtem(self): """The item in the previous iteration. Undefined during the first iteration. """ if self.first: return self._undefined("there is no previous item") return self._before @property def nextitem(self): """The item in the next iteration. Undefined during the last iteration. Causes the iterable to advance early. See :func:`itertools.groupby` for issues this can cause. The :func:`groupby` filter avoids that issue. """ rv = self._peek_next() if rv is missing: return self._undefined("there is no next item") return rv def cycle(self, *args): """Return a value from the given args, cycling through based on the current :attr:`index0`. :param args: One or more values to cycle through. """ if not args: raise TypeError("no items for cycling given") return args[self.index0 % len(args)] def changed(self, *value): """Return ``True`` if previously called with a different value (including when called for the first time). :param value: One or more values to compare to the last call. """ if self._last_changed_value != value: self._last_changed_value = value return True return False def __iter__(self): return self def __next__(self): if self._after is not missing: rv = self._after self._after = missing else: rv = next(self._iterator) self.index0 += 1 self._before = self._current self._current = rv return rv, self @internalcode def __call__(self, iterable): """When iterating over nested data, render the body of the loop recursively with the given inner iterable data. The loop must have the ``recursive`` marker for this to work. """ if self._recurse is None: raise TypeError( "The loop must have the 'recursive' marker to be called recursively." ) return self._recurse(iterable, self._recurse, depth=self.depth) def __repr__(self): return "<%s %d/%d>" % (self.__class__.__name__, self.index, self.length) class Macro(object): """Wraps a macro function.""" def __init__( self, environment, func, name, arguments, catch_kwargs, catch_varargs, caller, default_autoescape=None, ): self._environment = environment self._func = func self._argument_count = len(arguments) self.name = name self.arguments = arguments self.catch_kwargs = catch_kwargs self.catch_varargs = catch_varargs self.caller = caller self.explicit_caller = "caller" in arguments if default_autoescape is None: default_autoescape = environment.autoescape self._default_autoescape = default_autoescape @internalcode @evalcontextfunction def __call__(self, *args, **kwargs): # This requires a bit of explanation, In the past we used to # decide largely based on compile-time information if a macro is # safe or unsafe. While there was a volatile mode it was largely # unused for deciding on escaping. This turns out to be # problematic for macros because whether a macro is safe depends not # on the escape mode when it was defined, but rather when it was used. # # Because however we export macros from the module system and # there are historic callers that do not pass an eval context (and # will continue to not pass one), we need to perform an instance # check here. # # This is considered safe because an eval context is not a valid # argument to callables otherwise anyway. Worst case here is # that if no eval context is passed we fall back to the compile # time autoescape flag. if args and isinstance(args[0], EvalContext): autoescape = args[0].autoescape args = args[1:] else: autoescape = self._default_autoescape # try to consume the positional arguments arguments = list(args[: self._argument_count]) off = len(arguments) # For information why this is necessary refer to the handling # of caller in the `macro_body` handler in the compiler. found_caller = False # if the number of arguments consumed is not the number of # arguments expected we start filling in keyword arguments # and defaults. if off != self._argument_count: for name in self.arguments[len(arguments) :]: try: value = kwargs.pop(name) except KeyError: value = missing if name == "caller": found_caller = True arguments.append(value) else: found_caller = self.explicit_caller # it's important that the order of these arguments does not change # if not also changed in the compiler's `function_scoping` method. # the order is caller, keyword arguments, positional arguments! if self.caller and not found_caller: caller = kwargs.pop("caller", None) if caller is None: caller = self._environment.undefined("No caller defined", name="caller") arguments.append(caller) if self.catch_kwargs: arguments.append(kwargs) elif kwargs: if "caller" in kwargs: raise TypeError( "macro %r was invoked with two values for " "the special caller argument. This is " "most likely a bug." % self.name ) raise TypeError( "macro %r takes no keyword argument %r" % (self.name, next(iter(kwargs))) ) if self.catch_varargs: arguments.append(args[self._argument_count :]) elif len(args) > self._argument_count: raise TypeError( "macro %r takes not more than %d argument(s)" % (self.name, len(self.arguments)) ) return self._invoke(arguments, autoescape) def _invoke(self, arguments, autoescape): """This method is being swapped out by the async implementation.""" rv = self._func(*arguments) if autoescape: rv = Markup(rv) return rv def __repr__(self): return "<%s %s>" % ( self.__class__.__name__, self.name is None and "anonymous" or repr(self.name), ) @implements_to_string class Undefined(object): """The default undefined type. This undefined type can be printed and iterated over, but every other access will raise an :exc:`UndefinedError`: >>> foo = Undefined(name='foo') >>> str(foo) '' >>> not foo True >>> foo + 42 Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ __slots__ = ( "_undefined_hint", "_undefined_obj", "_undefined_name", "_undefined_exception", ) def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError): self._undefined_hint = hint self._undefined_obj = obj self._undefined_name = name self._undefined_exception = exc @property def _undefined_message(self): """Build a message about the undefined value based on how it was accessed. """ if self._undefined_hint: return self._undefined_hint if self._undefined_obj is missing: return "%r is undefined" % self._undefined_name if not isinstance(self._undefined_name, string_types): return "%s has no element %r" % ( object_type_repr(self._undefined_obj), self._undefined_name, ) return "%r has no attribute %r" % ( object_type_repr(self._undefined_obj), self._undefined_name, ) @internalcode def _fail_with_undefined_error(self, *args, **kwargs): """Raise an :exc:`UndefinedError` when operations are performed on the undefined value. """ raise self._undefined_exception(self._undefined_message) @internalcode def __getattr__(self, name): if name[:2] == "__": raise AttributeError(name) return self._fail_with_undefined_error() __add__ = ( __radd__ ) = ( __mul__ ) = ( __rmul__ ) = ( __div__ ) = ( __rdiv__ ) = ( __truediv__ ) = ( __rtruediv__ ) = ( __floordiv__ ) = ( __rfloordiv__ ) = ( __mod__ ) = ( __rmod__ ) = ( __pos__ ) = ( __neg__ ) = ( __call__ ) = ( __getitem__ ) = ( __lt__ ) = ( __le__ ) = ( __gt__ ) = ( __ge__ ) = ( __int__ ) = ( __float__ ) = ( __complex__ ) = __pow__ = __rpow__ = __sub__ = __rsub__ = _fail_with_undefined_error def __eq__(self, other): return type(self) is type(other) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return id(type(self)) def __str__(self): return u"" def __len__(self): return 0 def __iter__(self): if 0: yield None def __nonzero__(self): return False __bool__ = __nonzero__ def __repr__(self): return "Undefined" def make_logging_undefined(logger=None, base=None): """Given a logger object this returns a new undefined class that will log certain failures. It will log iterations and printing. If no logger is given a default logger is created. Example:: logger = logging.getLogger(__name__) LoggingUndefined = make_logging_undefined( logger=logger, base=Undefined ) .. versionadded:: 2.8 :param logger: the logger to use. If not provided, a default logger is created. :param base: the base class to add logging functionality to. This defaults to :class:`Undefined`. """ if logger is None: import logging logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stderr)) if base is None: base = Undefined def _log_message(undef): if undef._undefined_hint is None: if undef._undefined_obj is missing: hint = "%s is undefined" % undef._undefined_name elif not isinstance(undef._undefined_name, string_types): hint = "%s has no element %s" % ( object_type_repr(undef._undefined_obj), undef._undefined_name, ) else: hint = "%s has no attribute %s" % ( object_type_repr(undef._undefined_obj), undef._undefined_name, ) else: hint = undef._undefined_hint logger.warning("Template variable warning: %s", hint) class LoggingUndefined(base): def _fail_with_undefined_error(self, *args, **kwargs): try: return base._fail_with_undefined_error(self, *args, **kwargs) except self._undefined_exception as e: logger.error("Template variable error: %s", str(e)) raise e def __str__(self): rv = base.__str__(self) _log_message(self) return rv def __iter__(self): rv = base.__iter__(self) _log_message(self) return rv if PY2: def __nonzero__(self): rv = base.__nonzero__(self) _log_message(self) return rv def __unicode__(self): rv = base.__unicode__(self) _log_message(self) return rv else: def __bool__(self): rv = base.__bool__(self) _log_message(self) return rv return LoggingUndefined # No @implements_to_string decorator here because __str__ # is not overwritten from Undefined in this class. # This would cause a recursion error in Python 2. class ChainableUndefined(Undefined): """An undefined that is chainable, where both ``__getattr__`` and ``__getitem__`` return itself rather than raising an :exc:`UndefinedError`. >>> foo = ChainableUndefined(name='foo') >>> str(foo.bar['baz']) '' >>> foo.bar['baz'] + 42 Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined .. versionadded:: 2.11.0 """ __slots__ = () def __html__(self): return self.__str__() def __getattr__(self, _): return self __getitem__ = __getattr__ @implements_to_string class DebugUndefined(Undefined): """An undefined that returns the debug info when printed. >>> foo = DebugUndefined(name='foo') >>> str(foo) '{{ foo }}' >>> not foo True >>> foo + 42 Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ __slots__ = () def __str__(self): if self._undefined_hint is None: if self._undefined_obj is missing: return u"{{ %s }}" % self._undefined_name return "{{ no such element: %s[%r] }}" % ( object_type_repr(self._undefined_obj), self._undefined_name, ) return u"{{ undefined value printed: %s }}" % self._undefined_hint @implements_to_string class StrictUndefined(Undefined): """An undefined that barks on print and iteration as well as boolean tests and all kinds of comparisons. In other words: you can do nothing with it except checking if it's defined using the `defined` test. >>> foo = StrictUndefined(name='foo') >>> str(foo) Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined >>> not foo Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined >>> foo + 42 Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ __slots__ = () __iter__ = ( __str__ ) = ( __len__ ) = ( __nonzero__ ) = __eq__ = __ne__ = __bool__ = __hash__ = Undefined._fail_with_undefined_error # remove remaining slots attributes, after the metaclass did the magic they # are unneeded and irritating as they contain wrong data for the subclasses. del ( Undefined.__slots__, ChainableUndefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__, )
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/parser.py
# -*- coding: utf-8 -*- """Parse tokens from the lexer into nodes for the compiler.""" from . import nodes from ._compat import imap from .exceptions import TemplateAssertionError from .exceptions import TemplateSyntaxError from .lexer import describe_token from .lexer import describe_token_expr _statement_keywords = frozenset( [ "for", "if", "block", "extends", "print", "macro", "include", "from", "import", "set", "with", "autoescape", ] ) _compare_operators = frozenset(["eq", "ne", "lt", "lteq", "gt", "gteq"]) _math_nodes = { "add": nodes.Add, "sub": nodes.Sub, "mul": nodes.Mul, "div": nodes.Div, "floordiv": nodes.FloorDiv, "mod": nodes.Mod, } class Parser(object): """This is the central parsing class Jinja uses. It's passed to extensions and can be used to parse expressions or statements. """ def __init__(self, environment, source, name=None, filename=None, state=None): self.environment = environment self.stream = environment._tokenize(source, name, filename, state) self.name = name self.filename = filename self.closed = False self.extensions = {} for extension in environment.iter_extensions(): for tag in extension.tags: self.extensions[tag] = extension.parse self._last_identifier = 0 self._tag_stack = [] self._end_token_stack = [] def fail(self, msg, lineno=None, exc=TemplateSyntaxError): """Convenience method that raises `exc` with the message, passed line number or last line number as well as the current name and filename. """ if lineno is None: lineno = self.stream.current.lineno raise exc(msg, lineno, self.name, self.filename) def _fail_ut_eof(self, name, end_token_stack, lineno): expected = [] for exprs in end_token_stack: expected.extend(imap(describe_token_expr, exprs)) if end_token_stack: currently_looking = " or ".join( "'%s'" % describe_token_expr(expr) for expr in end_token_stack[-1] ) else: currently_looking = None if name is None: message = ["Unexpected end of template."] else: message = ["Encountered unknown tag '%s'." % name] if currently_looking: if name is not None and name in expected: message.append( "You probably made a nesting mistake. Jinja " "is expecting this tag, but currently looking " "for %s." % currently_looking ) else: message.append( "Jinja was looking for the following tags: " "%s." % currently_looking ) if self._tag_stack: message.append( "The innermost block that needs to be " "closed is '%s'." % self._tag_stack[-1] ) self.fail(" ".join(message), lineno) def fail_unknown_tag(self, name, lineno=None): """Called if the parser encounters an unknown tag. Tries to fail with a human readable error message that could help to identify the problem. """ return self._fail_ut_eof(name, self._end_token_stack, lineno) def fail_eof(self, end_tokens=None, lineno=None): """Like fail_unknown_tag but for end of template situations.""" stack = list(self._end_token_stack) if end_tokens is not None: stack.append(end_tokens) return self._fail_ut_eof(None, stack, lineno) def is_tuple_end(self, extra_end_rules=None): """Are we at the end of a tuple?""" if self.stream.current.type in ("variable_end", "block_end", "rparen"): return True elif extra_end_rules is not None: return self.stream.current.test_any(extra_end_rules) return False def free_identifier(self, lineno=None): """Return a new free identifier as :class:`~jinja2.nodes.InternalName`.""" self._last_identifier += 1 rv = object.__new__(nodes.InternalName) nodes.Node.__init__(rv, "fi%d" % self._last_identifier, lineno=lineno) return rv def parse_statement(self): """Parse a single statement.""" token = self.stream.current if token.type != "name": self.fail("tag name expected", token.lineno) self._tag_stack.append(token.value) pop_tag = True try: if token.value in _statement_keywords: return getattr(self, "parse_" + self.stream.current.value)() if token.value == "call": return self.parse_call_block() if token.value == "filter": return self.parse_filter_block() ext = self.extensions.get(token.value) if ext is not None: return ext(self) # did not work out, remove the token we pushed by accident # from the stack so that the unknown tag fail function can # produce a proper error message. self._tag_stack.pop() pop_tag = False self.fail_unknown_tag(token.value, token.lineno) finally: if pop_tag: self._tag_stack.pop() def parse_statements(self, end_tokens, drop_needle=False): """Parse multiple statements into a list until one of the end tokens is reached. This is used to parse the body of statements as it also parses template data if appropriate. The parser checks first if the current token is a colon and skips it if there is one. Then it checks for the block end and parses until if one of the `end_tokens` is reached. Per default the active token in the stream at the end of the call is the matched end token. If this is not wanted `drop_needle` can be set to `True` and the end token is removed. """ # the first token may be a colon for python compatibility self.stream.skip_if("colon") # in the future it would be possible to add whole code sections # by adding some sort of end of statement token and parsing those here. self.stream.expect("block_end") result = self.subparse(end_tokens) # we reached the end of the template too early, the subparser # does not check for this, so we do that now if self.stream.current.type == "eof": self.fail_eof(end_tokens) if drop_needle: next(self.stream) return result def parse_set(self): """Parse an assign statement.""" lineno = next(self.stream).lineno target = self.parse_assign_target(with_namespace=True) if self.stream.skip_if("assign"): expr = self.parse_tuple() return nodes.Assign(target, expr, lineno=lineno) filter_node = self.parse_filter(None) body = self.parse_statements(("name:endset",), drop_needle=True) return nodes.AssignBlock(target, filter_node, body, lineno=lineno) def parse_for(self): """Parse a for loop.""" lineno = self.stream.expect("name:for").lineno target = self.parse_assign_target(extra_end_rules=("name:in",)) self.stream.expect("name:in") iter = self.parse_tuple( with_condexpr=False, extra_end_rules=("name:recursive",) ) test = None if self.stream.skip_if("name:if"): test = self.parse_expression() recursive = self.stream.skip_if("name:recursive") body = self.parse_statements(("name:endfor", "name:else")) if next(self.stream).value == "endfor": else_ = [] else: else_ = self.parse_statements(("name:endfor",), drop_needle=True) return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno) def parse_if(self): """Parse an if construct.""" node = result = nodes.If(lineno=self.stream.expect("name:if").lineno) while 1: node.test = self.parse_tuple(with_condexpr=False) node.body = self.parse_statements(("name:elif", "name:else", "name:endif")) node.elif_ = [] node.else_ = [] token = next(self.stream) if token.test("name:elif"): node = nodes.If(lineno=self.stream.current.lineno) result.elif_.append(node) continue elif token.test("name:else"): result.else_ = self.parse_statements(("name:endif",), drop_needle=True) break return result def parse_with(self): node = nodes.With(lineno=next(self.stream).lineno) targets = [] values = [] while self.stream.current.type != "block_end": if targets: self.stream.expect("comma") target = self.parse_assign_target() target.set_ctx("param") targets.append(target) self.stream.expect("assign") values.append(self.parse_expression()) node.targets = targets node.values = values node.body = self.parse_statements(("name:endwith",), drop_needle=True) return node def parse_autoescape(self): node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno) node.options = [nodes.Keyword("autoescape", self.parse_expression())] node.body = self.parse_statements(("name:endautoescape",), drop_needle=True) return nodes.Scope([node]) def parse_block(self): node = nodes.Block(lineno=next(self.stream).lineno) node.name = self.stream.expect("name").value node.scoped = self.stream.skip_if("name:scoped") # common problem people encounter when switching from django # to jinja. we do not support hyphens in block names, so let's # raise a nicer error message in that case. if self.stream.current.type == "sub": self.fail( "Block names in Jinja have to be valid Python " "identifiers and may not contain hyphens, use an " "underscore instead." ) node.body = self.parse_statements(("name:endblock",), drop_needle=True) self.stream.skip_if("name:" + node.name) return node def parse_extends(self): node = nodes.Extends(lineno=next(self.stream).lineno) node.template = self.parse_expression() return node def parse_import_context(self, node, default): if self.stream.current.test_any( "name:with", "name:without" ) and self.stream.look().test("name:context"): node.with_context = next(self.stream).value == "with" self.stream.skip() else: node.with_context = default return node def parse_include(self): node = nodes.Include(lineno=next(self.stream).lineno) node.template = self.parse_expression() if self.stream.current.test("name:ignore") and self.stream.look().test( "name:missing" ): node.ignore_missing = True self.stream.skip(2) else: node.ignore_missing = False return self.parse_import_context(node, True) def parse_import(self): node = nodes.Import(lineno=next(self.stream).lineno) node.template = self.parse_expression() self.stream.expect("name:as") node.target = self.parse_assign_target(name_only=True).name return self.parse_import_context(node, False) def parse_from(self): node = nodes.FromImport(lineno=next(self.stream).lineno) node.template = self.parse_expression() self.stream.expect("name:import") node.names = [] def parse_context(): if self.stream.current.value in ( "with", "without", ) and self.stream.look().test("name:context"): node.with_context = next(self.stream).value == "with" self.stream.skip() return True return False while 1: if node.names: self.stream.expect("comma") if self.stream.current.type == "name": if parse_context(): break target = self.parse_assign_target(name_only=True) if target.name.startswith("_"): self.fail( "names starting with an underline can not be imported", target.lineno, exc=TemplateAssertionError, ) if self.stream.skip_if("name:as"): alias = self.parse_assign_target(name_only=True) node.names.append((target.name, alias.name)) else: node.names.append(target.name) if parse_context() or self.stream.current.type != "comma": break else: self.stream.expect("name") if not hasattr(node, "with_context"): node.with_context = False return node def parse_signature(self, node): node.args = args = [] node.defaults = defaults = [] self.stream.expect("lparen") while self.stream.current.type != "rparen": if args: self.stream.expect("comma") arg = self.parse_assign_target(name_only=True) arg.set_ctx("param") if self.stream.skip_if("assign"): defaults.append(self.parse_expression()) elif defaults: self.fail("non-default argument follows default argument") args.append(arg) self.stream.expect("rparen") def parse_call_block(self): node = nodes.CallBlock(lineno=next(self.stream).lineno) if self.stream.current.type == "lparen": self.parse_signature(node) else: node.args = [] node.defaults = [] node.call = self.parse_expression() if not isinstance(node.call, nodes.Call): self.fail("expected call", node.lineno) node.body = self.parse_statements(("name:endcall",), drop_needle=True) return node def parse_filter_block(self): node = nodes.FilterBlock(lineno=next(self.stream).lineno) node.filter = self.parse_filter(None, start_inline=True) node.body = self.parse_statements(("name:endfilter",), drop_needle=True) return node def parse_macro(self): node = nodes.Macro(lineno=next(self.stream).lineno) node.name = self.parse_assign_target(name_only=True).name self.parse_signature(node) node.body = self.parse_statements(("name:endmacro",), drop_needle=True) return node def parse_print(self): node = nodes.Output(lineno=next(self.stream).lineno) node.nodes = [] while self.stream.current.type != "block_end": if node.nodes: self.stream.expect("comma") node.nodes.append(self.parse_expression()) return node def parse_assign_target( self, with_tuple=True, name_only=False, extra_end_rules=None, with_namespace=False, ): """Parse an assignment target. As Jinja allows assignments to tuples, this function can parse all allowed assignment targets. Per default assignments to tuples are parsed, that can be disable however by setting `with_tuple` to `False`. If only assignments to names are wanted `name_only` can be set to `True`. The `extra_end_rules` parameter is forwarded to the tuple parsing function. If `with_namespace` is enabled, a namespace assignment may be parsed. """ if with_namespace and self.stream.look().type == "dot": token = self.stream.expect("name") next(self.stream) # dot attr = self.stream.expect("name") target = nodes.NSRef(token.value, attr.value, lineno=token.lineno) elif name_only: token = self.stream.expect("name") target = nodes.Name(token.value, "store", lineno=token.lineno) else: if with_tuple: target = self.parse_tuple( simplified=True, extra_end_rules=extra_end_rules ) else: target = self.parse_primary() target.set_ctx("store") if not target.can_assign(): self.fail( "can't assign to %r" % target.__class__.__name__.lower(), target.lineno ) return target def parse_expression(self, with_condexpr=True): """Parse an expression. Per default all expressions are parsed, if the optional `with_condexpr` parameter is set to `False` conditional expressions are not parsed. """ if with_condexpr: return self.parse_condexpr() return self.parse_or() def parse_condexpr(self): lineno = self.stream.current.lineno expr1 = self.parse_or() while self.stream.skip_if("name:if"): expr2 = self.parse_or() if self.stream.skip_if("name:else"): expr3 = self.parse_condexpr() else: expr3 = None expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno) lineno = self.stream.current.lineno return expr1 def parse_or(self): lineno = self.stream.current.lineno left = self.parse_and() while self.stream.skip_if("name:or"): right = self.parse_and() left = nodes.Or(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_and(self): lineno = self.stream.current.lineno left = self.parse_not() while self.stream.skip_if("name:and"): right = self.parse_not() left = nodes.And(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_not(self): if self.stream.current.test("name:not"): lineno = next(self.stream).lineno return nodes.Not(self.parse_not(), lineno=lineno) return self.parse_compare() def parse_compare(self): lineno = self.stream.current.lineno expr = self.parse_math1() ops = [] while 1: token_type = self.stream.current.type if token_type in _compare_operators: next(self.stream) ops.append(nodes.Operand(token_type, self.parse_math1())) elif self.stream.skip_if("name:in"): ops.append(nodes.Operand("in", self.parse_math1())) elif self.stream.current.test("name:not") and self.stream.look().test( "name:in" ): self.stream.skip(2) ops.append(nodes.Operand("notin", self.parse_math1())) else: break lineno = self.stream.current.lineno if not ops: return expr return nodes.Compare(expr, ops, lineno=lineno) def parse_math1(self): lineno = self.stream.current.lineno left = self.parse_concat() while self.stream.current.type in ("add", "sub"): cls = _math_nodes[self.stream.current.type] next(self.stream) right = self.parse_concat() left = cls(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_concat(self): lineno = self.stream.current.lineno args = [self.parse_math2()] while self.stream.current.type == "tilde": next(self.stream) args.append(self.parse_math2()) if len(args) == 1: return args[0] return nodes.Concat(args, lineno=lineno) def parse_math2(self): lineno = self.stream.current.lineno left = self.parse_pow() while self.stream.current.type in ("mul", "div", "floordiv", "mod"): cls = _math_nodes[self.stream.current.type] next(self.stream) right = self.parse_pow() left = cls(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_pow(self): lineno = self.stream.current.lineno left = self.parse_unary() while self.stream.current.type == "pow": next(self.stream) right = self.parse_unary() left = nodes.Pow(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_unary(self, with_filter=True): token_type = self.stream.current.type lineno = self.stream.current.lineno if token_type == "sub": next(self.stream) node = nodes.Neg(self.parse_unary(False), lineno=lineno) elif token_type == "add": next(self.stream) node = nodes.Pos(self.parse_unary(False), lineno=lineno) else: node = self.parse_primary() node = self.parse_postfix(node) if with_filter: node = self.parse_filter_expr(node) return node def parse_primary(self): token = self.stream.current if token.type == "name": if token.value in ("true", "false", "True", "False"): node = nodes.Const(token.value in ("true", "True"), lineno=token.lineno) elif token.value in ("none", "None"): node = nodes.Const(None, lineno=token.lineno) else: node = nodes.Name(token.value, "load", lineno=token.lineno) next(self.stream) elif token.type == "string": next(self.stream) buf = [token.value] lineno = token.lineno while self.stream.current.type == "string": buf.append(self.stream.current.value) next(self.stream) node = nodes.Const("".join(buf), lineno=lineno) elif token.type in ("integer", "float"): next(self.stream) node = nodes.Const(token.value, lineno=token.lineno) elif token.type == "lparen": next(self.stream) node = self.parse_tuple(explicit_parentheses=True) self.stream.expect("rparen") elif token.type == "lbracket": node = self.parse_list() elif token.type == "lbrace": node = self.parse_dict() else: self.fail("unexpected '%s'" % describe_token(token), token.lineno) return node def parse_tuple( self, simplified=False, with_condexpr=True, extra_end_rules=None, explicit_parentheses=False, ): """Works like `parse_expression` but if multiple expressions are delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created. This method could also return a regular expression instead of a tuple if no commas where found. The default parsing mode is a full tuple. If `simplified` is `True` only names and literals are parsed. The `no_condexpr` parameter is forwarded to :meth:`parse_expression`. Because tuples do not require delimiters and may end in a bogus comma an extra hint is needed that marks the end of a tuple. For example for loops support tuples between `for` and `in`. In that case the `extra_end_rules` is set to ``['name:in']``. `explicit_parentheses` is true if the parsing was triggered by an expression in parentheses. This is used to figure out if an empty tuple is a valid expression or not. """ lineno = self.stream.current.lineno if simplified: parse = self.parse_primary elif with_condexpr: parse = self.parse_expression else: def parse(): return self.parse_expression(with_condexpr=False) args = [] is_tuple = False while 1: if args: self.stream.expect("comma") if self.is_tuple_end(extra_end_rules): break args.append(parse()) if self.stream.current.type == "comma": is_tuple = True else: break lineno = self.stream.current.lineno if not is_tuple: if args: return args[0] # if we don't have explicit parentheses, an empty tuple is # not a valid expression. This would mean nothing (literally # nothing) in the spot of an expression would be an empty # tuple. if not explicit_parentheses: self.fail( "Expected an expression, got '%s'" % describe_token(self.stream.current) ) return nodes.Tuple(args, "load", lineno=lineno) def parse_list(self): token = self.stream.expect("lbracket") items = [] while self.stream.current.type != "rbracket": if items: self.stream.expect("comma") if self.stream.current.type == "rbracket": break items.append(self.parse_expression()) self.stream.expect("rbracket") return nodes.List(items, lineno=token.lineno) def parse_dict(self): token = self.stream.expect("lbrace") items = [] while self.stream.current.type != "rbrace": if items: self.stream.expect("comma") if self.stream.current.type == "rbrace": break key = self.parse_expression() self.stream.expect("colon") value = self.parse_expression() items.append(nodes.Pair(key, value, lineno=key.lineno)) self.stream.expect("rbrace") return nodes.Dict(items, lineno=token.lineno) def parse_postfix(self, node): while 1: token_type = self.stream.current.type if token_type == "dot" or token_type == "lbracket": node = self.parse_subscript(node) # calls are valid both after postfix expressions (getattr # and getitem) as well as filters and tests elif token_type == "lparen": node = self.parse_call(node) else: break return node def parse_filter_expr(self, node): while 1: token_type = self.stream.current.type if token_type == "pipe": node = self.parse_filter(node) elif token_type == "name" and self.stream.current.value == "is": node = self.parse_test(node) # calls are valid both after postfix expressions (getattr # and getitem) as well as filters and tests elif token_type == "lparen": node = self.parse_call(node) else: break return node def parse_subscript(self, node): token = next(self.stream) if token.type == "dot": attr_token = self.stream.current next(self.stream) if attr_token.type == "name": return nodes.Getattr( node, attr_token.value, "load", lineno=token.lineno ) elif attr_token.type != "integer": self.fail("expected name or number", attr_token.lineno) arg = nodes.Const(attr_token.value, lineno=attr_token.lineno) return nodes.Getitem(node, arg, "load", lineno=token.lineno) if token.type == "lbracket": args = [] while self.stream.current.type != "rbracket": if args: self.stream.expect("comma") args.append(self.parse_subscribed()) self.stream.expect("rbracket") if len(args) == 1: arg = args[0] else: arg = nodes.Tuple(args, "load", lineno=token.lineno) return nodes.Getitem(node, arg, "load", lineno=token.lineno) self.fail("expected subscript expression", token.lineno) def parse_subscribed(self): lineno = self.stream.current.lineno if self.stream.current.type == "colon": next(self.stream) args = [None] else: node = self.parse_expression() if self.stream.current.type != "colon": return node next(self.stream) args = [node] if self.stream.current.type == "colon": args.append(None) elif self.stream.current.type not in ("rbracket", "comma"): args.append(self.parse_expression()) else: args.append(None) if self.stream.current.type == "colon": next(self.stream) if self.stream.current.type not in ("rbracket", "comma"): args.append(self.parse_expression()) else: args.append(None) else: args.append(None) return nodes.Slice(lineno=lineno, *args) def parse_call(self, node): token = self.stream.expect("lparen") args = [] kwargs = [] dyn_args = dyn_kwargs = None require_comma = False def ensure(expr): if not expr: self.fail("invalid syntax for function call expression", token.lineno) while self.stream.current.type != "rparen": if require_comma: self.stream.expect("comma") # support for trailing comma if self.stream.current.type == "rparen": break if self.stream.current.type == "mul": ensure(dyn_args is None and dyn_kwargs is None) next(self.stream) dyn_args = self.parse_expression() elif self.stream.current.type == "pow": ensure(dyn_kwargs is None) next(self.stream) dyn_kwargs = self.parse_expression() else: if ( self.stream.current.type == "name" and self.stream.look().type == "assign" ): # Parsing a kwarg ensure(dyn_kwargs is None) key = self.stream.current.value self.stream.skip(2) value = self.parse_expression() kwargs.append(nodes.Keyword(key, value, lineno=value.lineno)) else: # Parsing an arg ensure(dyn_args is None and dyn_kwargs is None and not kwargs) args.append(self.parse_expression()) require_comma = True self.stream.expect("rparen") if node is None: return args, kwargs, dyn_args, dyn_kwargs return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno) def parse_filter(self, node, start_inline=False): while self.stream.current.type == "pipe" or start_inline: if not start_inline: next(self.stream) token = self.stream.expect("name") name = token.value while self.stream.current.type == "dot": next(self.stream) name += "." + self.stream.expect("name").value if self.stream.current.type == "lparen": args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None) else: args = [] kwargs = [] dyn_args = dyn_kwargs = None node = nodes.Filter( node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno ) start_inline = False return node def parse_test(self, node): token = next(self.stream) if self.stream.current.test("name:not"): next(self.stream) negated = True else: negated = False name = self.stream.expect("name").value while self.stream.current.type == "dot": next(self.stream) name += "." + self.stream.expect("name").value dyn_args = dyn_kwargs = None kwargs = [] if self.stream.current.type == "lparen": args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None) elif self.stream.current.type in ( "name", "string", "integer", "float", "lparen", "lbracket", "lbrace", ) and not self.stream.current.test_any("name:else", "name:or", "name:and"): if self.stream.current.test("name:is"): self.fail("You cannot chain multiple tests with is") arg_node = self.parse_primary() arg_node = self.parse_postfix(arg_node) args = [arg_node] else: args = [] node = nodes.Test( node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno ) if negated: node = nodes.Not(node, lineno=token.lineno) return node def subparse(self, end_tokens=None): body = [] data_buffer = [] add_data = data_buffer.append if end_tokens is not None: self._end_token_stack.append(end_tokens) def flush_data(): if data_buffer: lineno = data_buffer[0].lineno body.append(nodes.Output(data_buffer[:], lineno=lineno)) del data_buffer[:] try: while self.stream: token = self.stream.current if token.type == "data": if token.value: add_data(nodes.TemplateData(token.value, lineno=token.lineno)) next(self.stream) elif token.type == "variable_begin": next(self.stream) add_data(self.parse_tuple(with_condexpr=True)) self.stream.expect("variable_end") elif token.type == "block_begin": flush_data() next(self.stream) if end_tokens is not None and self.stream.current.test_any( *end_tokens ): return body rv = self.parse_statement() if isinstance(rv, list): body.extend(rv) else: body.append(rv) self.stream.expect("block_end") else: raise AssertionError("internal parsing error") flush_data() finally: if end_tokens is not None: self._end_token_stack.pop() return body def parse(self): """Parse the whole template into a `Template` node.""" result = nodes.Template(self.subparse(), lineno=1) result.set_environment(self.environment) return result
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/sandbox.py
# -*- coding: utf-8 -*- """A sandbox layer that ensures unsafe operations cannot be performed. Useful when the template itself comes from an untrusted source. """ import operator import types import warnings from collections import deque from string import Formatter from markupsafe import EscapeFormatter from markupsafe import Markup from ._compat import abc from ._compat import PY2 from ._compat import range_type from ._compat import string_types from .environment import Environment from .exceptions import SecurityError #: maximum number of items a range may produce MAX_RANGE = 100000 #: attributes of function objects that are considered unsafe. if PY2: UNSAFE_FUNCTION_ATTRIBUTES = { "func_closure", "func_code", "func_dict", "func_defaults", "func_globals", } else: # On versions > python 2 the special attributes on functions are gone, # but they remain on methods and generators for whatever reason. UNSAFE_FUNCTION_ATTRIBUTES = set() #: unsafe method attributes. function attributes are unsafe for methods too UNSAFE_METHOD_ATTRIBUTES = {"im_class", "im_func", "im_self"} #: unsafe generator attributes. UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"} #: unsafe attributes on coroutines UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"} #: unsafe attributes on async generators UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"} # make sure we don't warn in python 2.6 about stuff we don't care about warnings.filterwarnings( "ignore", "the sets module", DeprecationWarning, module=__name__ ) _mutable_set_types = (set,) _mutable_mapping_types = (dict,) _mutable_sequence_types = (list,) # on python 2.x we can register the user collection types try: from UserDict import UserDict, DictMixin from UserList import UserList _mutable_mapping_types += (UserDict, DictMixin) _mutable_set_types += (UserList,) except ImportError: pass # if sets is still available, register the mutable set from there as well try: from sets import Set _mutable_set_types += (Set,) except ImportError: pass #: register Python 2.6 abstract base classes _mutable_set_types += (abc.MutableSet,) _mutable_mapping_types += (abc.MutableMapping,) _mutable_sequence_types += (abc.MutableSequence,) _mutable_spec = ( ( _mutable_set_types, frozenset( [ "add", "clear", "difference_update", "discard", "pop", "remove", "symmetric_difference_update", "update", ] ), ), ( _mutable_mapping_types, frozenset(["clear", "pop", "popitem", "setdefault", "update"]), ), ( _mutable_sequence_types, frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]), ), ( deque, frozenset( [ "append", "appendleft", "clear", "extend", "extendleft", "pop", "popleft", "remove", "rotate", ] ), ), ) class _MagicFormatMapping(abc.Mapping): """This class implements a dummy wrapper to fix a bug in the Python standard library for string formatting. See https://bugs.python.org/issue13598 for information about why this is necessary. """ def __init__(self, args, kwargs): self._args = args self._kwargs = kwargs self._last_index = 0 def __getitem__(self, key): if key == "": idx = self._last_index self._last_index += 1 try: return self._args[idx] except LookupError: pass key = str(idx) return self._kwargs[key] def __iter__(self): return iter(self._kwargs) def __len__(self): return len(self._kwargs) def inspect_format_method(callable): if not isinstance( callable, (types.MethodType, types.BuiltinMethodType) ) or callable.__name__ not in ("format", "format_map"): return None obj = callable.__self__ if isinstance(obj, string_types): return obj def safe_range(*args): """A range that can't generate ranges with a length of more than MAX_RANGE items. """ rng = range_type(*args) if len(rng) > MAX_RANGE: raise OverflowError( "Range too big. The sandbox blocks ranges larger than" " MAX_RANGE (%d)." % MAX_RANGE ) return rng def unsafe(f): """Marks a function or method as unsafe. :: @unsafe def delete(self): pass """ f.unsafe_callable = True return f def is_internal_attribute(obj, attr): """Test if the attribute given is an internal python attribute. For example this function returns `True` for the `func_code` attribute of python objects. This is useful if the environment method :meth:`~SandboxedEnvironment.is_safe_attribute` is overridden. >>> from jinja2.sandbox import is_internal_attribute >>> is_internal_attribute(str, "mro") True >>> is_internal_attribute(str, "upper") False """ if isinstance(obj, types.FunctionType): if attr in UNSAFE_FUNCTION_ATTRIBUTES: return True elif isinstance(obj, types.MethodType): if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES: return True elif isinstance(obj, type): if attr == "mro": return True elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)): return True elif isinstance(obj, types.GeneratorType): if attr in UNSAFE_GENERATOR_ATTRIBUTES: return True elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType): if attr in UNSAFE_COROUTINE_ATTRIBUTES: return True elif hasattr(types, "AsyncGeneratorType") and isinstance( obj, types.AsyncGeneratorType ): if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES: return True return attr.startswith("__") def modifies_known_mutable(obj, attr): """This function checks if an attribute on a builtin mutable object (list, dict, set or deque) would modify it if called. It also supports the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and with Python 2.6 onwards the abstract base classes `MutableSet`, `MutableMapping`, and `MutableSequence`. >>> modifies_known_mutable({}, "clear") True >>> modifies_known_mutable({}, "keys") False >>> modifies_known_mutable([], "append") True >>> modifies_known_mutable([], "index") False If called with an unsupported object (such as unicode) `False` is returned. >>> modifies_known_mutable("foo", "upper") False """ for typespec, unsafe in _mutable_spec: if isinstance(obj, typespec): return attr in unsafe return False class SandboxedEnvironment(Environment): """The sandboxed environment. It works like the regular environment but tells the compiler to generate sandboxed code. Additionally subclasses of this environment may override the methods that tell the runtime what attributes or functions are safe to access. If the template tries to access insecure code a :exc:`SecurityError` is raised. However also other exceptions may occur during the rendering so the caller has to ensure that all exceptions are caught. """ sandboxed = True #: default callback table for the binary operators. A copy of this is #: available on each instance of a sandboxed environment as #: :attr:`binop_table` default_binop_table = { "+": operator.add, "-": operator.sub, "*": operator.mul, "/": operator.truediv, "//": operator.floordiv, "**": operator.pow, "%": operator.mod, } #: default callback table for the unary operators. A copy of this is #: available on each instance of a sandboxed environment as #: :attr:`unop_table` default_unop_table = {"+": operator.pos, "-": operator.neg} #: a set of binary operators that should be intercepted. Each operator #: that is added to this set (empty by default) is delegated to the #: :meth:`call_binop` method that will perform the operator. The default #: operator callback is specified by :attr:`binop_table`. #: #: The following binary operators are interceptable: #: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**`` #: #: The default operation form the operator table corresponds to the #: builtin function. Intercepted calls are always slower than the native #: operator call, so make sure only to intercept the ones you are #: interested in. #: #: .. versionadded:: 2.6 intercepted_binops = frozenset() #: a set of unary operators that should be intercepted. Each operator #: that is added to this set (empty by default) is delegated to the #: :meth:`call_unop` method that will perform the operator. The default #: operator callback is specified by :attr:`unop_table`. #: #: The following unary operators are interceptable: ``+``, ``-`` #: #: The default operation form the operator table corresponds to the #: builtin function. Intercepted calls are always slower than the native #: operator call, so make sure only to intercept the ones you are #: interested in. #: #: .. versionadded:: 2.6 intercepted_unops = frozenset() def intercept_unop(self, operator): """Called during template compilation with the name of a unary operator to check if it should be intercepted at runtime. If this method returns `True`, :meth:`call_unop` is executed for this unary operator. The default implementation of :meth:`call_unop` will use the :attr:`unop_table` dictionary to perform the operator with the same logic as the builtin one. The following unary operators are interceptable: ``+`` and ``-`` Intercepted calls are always slower than the native operator call, so make sure only to intercept the ones you are interested in. .. versionadded:: 2.6 """ return False def __init__(self, *args, **kwargs): Environment.__init__(self, *args, **kwargs) self.globals["range"] = safe_range self.binop_table = self.default_binop_table.copy() self.unop_table = self.default_unop_table.copy() def is_safe_attribute(self, obj, attr, value): """The sandboxed environment will call this method to check if the attribute of an object is safe to access. Per default all attributes starting with an underscore are considered private as well as the special attributes of internal python objects as returned by the :func:`is_internal_attribute` function. """ return not (attr.startswith("_") or is_internal_attribute(obj, attr)) def is_safe_callable(self, obj): """Check if an object is safely callable. Per default a function is considered safe unless the `unsafe_callable` attribute exists and is True. Override this method to alter the behavior, but this won't affect the `unsafe` decorator from this module. """ return not ( getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False) ) def call_binop(self, context, operator, left, right): """For intercepted binary operator calls (:meth:`intercepted_binops`) this function is executed instead of the builtin operator. This can be used to fine tune the behavior of certain operators. .. versionadded:: 2.6 """ return self.binop_table[operator](left, right) def call_unop(self, context, operator, arg): """For intercepted unary operator calls (:meth:`intercepted_unops`) this function is executed instead of the builtin operator. This can be used to fine tune the behavior of certain operators. .. versionadded:: 2.6 """ return self.unop_table[operator](arg) def getitem(self, obj, argument): """Subscribe an object from sandboxed code.""" try: return obj[argument] except (TypeError, LookupError): if isinstance(argument, string_types): try: attr = str(argument) except Exception: pass else: try: value = getattr(obj, attr) except AttributeError: pass else: if self.is_safe_attribute(obj, argument, value): return value return self.unsafe_undefined(obj, argument) return self.undefined(obj=obj, name=argument) def getattr(self, obj, attribute): """Subscribe an object from sandboxed code and prefer the attribute. The attribute passed *must* be a bytestring. """ try: value = getattr(obj, attribute) except AttributeError: try: return obj[attribute] except (TypeError, LookupError): pass else: if self.is_safe_attribute(obj, attribute, value): return value return self.unsafe_undefined(obj, attribute) return self.undefined(obj=obj, name=attribute) def unsafe_undefined(self, obj, attribute): """Return an undefined object for unsafe attributes.""" return self.undefined( "access to attribute %r of %r " "object is unsafe." % (attribute, obj.__class__.__name__), name=attribute, obj=obj, exc=SecurityError, ) def format_string(self, s, args, kwargs, format_func=None): """If a format call is detected, then this is routed through this method so that our safety sandbox can be used for it. """ if isinstance(s, Markup): formatter = SandboxedEscapeFormatter(self, s.escape) else: formatter = SandboxedFormatter(self) if format_func is not None and format_func.__name__ == "format_map": if len(args) != 1 or kwargs: raise TypeError( "format_map() takes exactly one argument %d given" % (len(args) + (kwargs is not None)) ) kwargs = args[0] args = None kwargs = _MagicFormatMapping(args, kwargs) rv = formatter.vformat(s, args, kwargs) return type(s)(rv) def call(__self, __context, __obj, *args, **kwargs): # noqa: B902 """Call an object from sandboxed code.""" fmt = inspect_format_method(__obj) if fmt is not None: return __self.format_string(fmt, args, kwargs, __obj) # the double prefixes are to avoid double keyword argument # errors when proxying the call. if not __self.is_safe_callable(__obj): raise SecurityError("%r is not safely callable" % (__obj,)) return __context.call(__obj, *args, **kwargs) class ImmutableSandboxedEnvironment(SandboxedEnvironment): """Works exactly like the regular `SandboxedEnvironment` but does not permit modifications on the builtin mutable objects `list`, `set`, and `dict` by using the :func:`modifies_known_mutable` function. """ def is_safe_attribute(self, obj, attr, value): if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value): return False return not modifies_known_mutable(obj, attr) # This really is not a public API apparently. try: from _string import formatter_field_name_split except ImportError: def formatter_field_name_split(field_name): return field_name._formatter_field_name_split() class SandboxedFormatterMixin(object): def __init__(self, env): self._env = env def get_field(self, field_name, args, kwargs): first, rest = formatter_field_name_split(field_name) obj = self.get_value(first, args, kwargs) for is_attr, i in rest: if is_attr: obj = self._env.getattr(obj, i) else: obj = self._env.getitem(obj, i) return obj, first class SandboxedFormatter(SandboxedFormatterMixin, Formatter): def __init__(self, env): SandboxedFormatterMixin.__init__(self, env) Formatter.__init__(self) class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter): def __init__(self, env, escape): SandboxedFormatterMixin.__init__(self, env) EscapeFormatter.__init__(self, escape)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/visitor.py
# -*- coding: utf-8 -*- """API for traversing the AST nodes. Implemented by the compiler and meta introspection. """ from .nodes import Node class NodeVisitor(object): """Walks the abstract syntax tree and call visitor functions for every node found. The visitor functions may return values which will be forwarded by the `visit` method. Per default the visitor functions for the nodes are ``'visit_'`` + class name of the node. So a `TryFinally` node visit function would be `visit_TryFinally`. This behavior can be changed by overriding the `get_visitor` function. If no visitor function exists for a node (return value `None`) the `generic_visit` visitor is used instead. """ def get_visitor(self, node): """Return the visitor function for this node or `None` if no visitor exists for this node. In that case the generic visit function is used instead. """ method = "visit_" + node.__class__.__name__ return getattr(self, method, None) def visit(self, node, *args, **kwargs): """Visit a node.""" f = self.get_visitor(node) if f is not None: return f(node, *args, **kwargs) return self.generic_visit(node, *args, **kwargs) def generic_visit(self, node, *args, **kwargs): """Called if no explicit visitor function exists for a node.""" for node in node.iter_child_nodes(): self.visit(node, *args, **kwargs) class NodeTransformer(NodeVisitor): """Walks the abstract syntax tree and allows modifications of nodes. The `NodeTransformer` will walk the AST and use the return value of the visitor functions to replace or remove the old node. If the return value of the visitor function is `None` the node will be removed from the previous location otherwise it's replaced with the return value. The return value may be the original node in which case no replacement takes place. """ def generic_visit(self, node, *args, **kwargs): for field, old_value in node.iter_fields(): if isinstance(old_value, list): new_values = [] for value in old_value: if isinstance(value, Node): value = self.visit(value, *args, **kwargs) if value is None: continue elif not isinstance(value, Node): new_values.extend(value) continue new_values.append(value) old_value[:] = new_values elif isinstance(old_value, Node): new_node = self.visit(old_value, *args, **kwargs) if new_node is None: delattr(node, field) else: setattr(node, field, new_node) return node def visit_list(self, node, *args, **kwargs): """As transformers may return lists in some places this method can be used to enforce a list as return value. """ rv = self.visit(node, *args, **kwargs) if not isinstance(rv, list): rv = [rv] return rv
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/utils.py
# -*- coding: utf-8 -*- import json import os import re import warnings from collections import deque from random import choice from random import randrange from threading import Lock from markupsafe import escape from markupsafe import Markup from ._compat import abc from ._compat import string_types from ._compat import text_type from ._compat import url_quote _word_split_re = re.compile(r"(\s+)") _punctuation_re = re.compile( "^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$" % ( "|".join(map(re.escape, ("(", "<", "&lt;"))), "|".join(map(re.escape, (".", ",", ")", ">", "\n", "&gt;"))), ) ) _simple_email_re = re.compile(r"^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$") _striptags_re = re.compile(r"(<!--.*?-->|<[^>]*>)") _entity_re = re.compile(r"&([^;]+);") _letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" _digits = "0123456789" # special singleton representing missing values for the runtime missing = type("MissingType", (), {"__repr__": lambda x: "missing"})() # internal code internal_code = set() concat = u"".join _slash_escape = "\\/" not in json.dumps("/") def contextfunction(f): """This decorator can be used to mark a function or method context callable. A context callable is passed the active :class:`Context` as first argument when called from the template. This is useful if a function wants to get access to the context or functions provided on the context object. For example a function that returns a sorted list of template variables the current template exports could look like this:: @contextfunction def get_exported_names(context): return sorted(context.exported_vars) """ f.contextfunction = True return f def evalcontextfunction(f): """This decorator can be used to mark a function or method as an eval context callable. This is similar to the :func:`contextfunction` but instead of passing the context, an evaluation context object is passed. For more information about the eval context, see :ref:`eval-context`. .. versionadded:: 2.4 """ f.evalcontextfunction = True return f def environmentfunction(f): """This decorator can be used to mark a function or method as environment callable. This decorator works exactly like the :func:`contextfunction` decorator just that the first argument is the active :class:`Environment` and not context. """ f.environmentfunction = True return f def internalcode(f): """Marks the function as internally used""" internal_code.add(f.__code__) return f def is_undefined(obj): """Check if the object passed is undefined. This does nothing more than performing an instance check against :class:`Undefined` but looks nicer. This can be used for custom filters or tests that want to react to undefined variables. For example a custom default filter can look like this:: def default(var, default=''): if is_undefined(var): return default return var """ from .runtime import Undefined return isinstance(obj, Undefined) def consume(iterable): """Consumes an iterable without doing anything with it.""" for _ in iterable: pass def clear_caches(): """Jinja keeps internal caches for environments and lexers. These are used so that Jinja doesn't have to recreate environments and lexers all the time. Normally you don't have to care about that but if you are measuring memory consumption you may want to clean the caches. """ from .environment import _spontaneous_environments from .lexer import _lexer_cache _spontaneous_environments.clear() _lexer_cache.clear() def import_string(import_name, silent=False): """Imports an object based on a string. This is useful if you want to use import paths as endpoints or something similar. An import path can be specified either in dotted notation (``xml.sax.saxutils.escape``) or with a colon as object delimiter (``xml.sax.saxutils:escape``). If the `silent` is True the return value will be `None` if the import fails. :return: imported object """ try: if ":" in import_name: module, obj = import_name.split(":", 1) elif "." in import_name: module, _, obj = import_name.rpartition(".") else: return __import__(import_name) return getattr(__import__(module, None, None, [obj]), obj) except (ImportError, AttributeError): if not silent: raise def open_if_exists(filename, mode="rb"): """Returns a file descriptor for the filename if that file exists, otherwise ``None``. """ if not os.path.isfile(filename): return None return open(filename, mode) def object_type_repr(obj): """Returns the name of the object's type. For some recognized singletons the name of the object is returned instead. (For example for `None` and `Ellipsis`). """ if obj is None: return "None" elif obj is Ellipsis: return "Ellipsis" cls = type(obj) # __builtin__ in 2.x, builtins in 3.x if cls.__module__ in ("__builtin__", "builtins"): name = cls.__name__ else: name = cls.__module__ + "." + cls.__name__ return "%s object" % name def pformat(obj, verbose=False): """Prettyprint an object. Either use the `pretty` library or the builtin `pprint`. """ try: from pretty import pretty return pretty(obj, verbose=verbose) except ImportError: from pprint import pformat return pformat(obj) def urlize(text, trim_url_limit=None, rel=None, target=None): """Converts any URLs in text into clickable links. Works on http://, https:// and www. links. Links can have trailing punctuation (periods, commas, close-parens) and leading punctuation (opening parens) and it'll still do the right thing. If trim_url_limit is not None, the URLs in link text will be limited to trim_url_limit characters. If nofollow is True, the URLs in link text will get a rel="nofollow" attribute. If target is not None, a target attribute will be added to the link. """ trim_url = ( lambda x, limit=trim_url_limit: limit is not None and (x[:limit] + (len(x) >= limit and "..." or "")) or x ) words = _word_split_re.split(text_type(escape(text))) rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or "" target_attr = target and ' target="%s"' % escape(target) or "" for i, word in enumerate(words): match = _punctuation_re.match(word) if match: lead, middle, trail = match.groups() if middle.startswith("www.") or ( "@" not in middle and not middle.startswith("http://") and not middle.startswith("https://") and len(middle) > 0 and middle[0] in _letters + _digits and ( middle.endswith(".org") or middle.endswith(".net") or middle.endswith(".com") ) ): middle = '<a href="http://%s"%s%s>%s</a>' % ( middle, rel_attr, target_attr, trim_url(middle), ) if middle.startswith("http://") or middle.startswith("https://"): middle = '<a href="%s"%s%s>%s</a>' % ( middle, rel_attr, target_attr, trim_url(middle), ) if ( "@" in middle and not middle.startswith("www.") and ":" not in middle and _simple_email_re.match(middle) ): middle = '<a href="mailto:%s">%s</a>' % (middle, middle) if lead + middle + trail != word: words[i] = lead + middle + trail return u"".join(words) def generate_lorem_ipsum(n=5, html=True, min=20, max=100): """Generate some lorem ipsum for the template.""" from .constants import LOREM_IPSUM_WORDS words = LOREM_IPSUM_WORDS.split() result = [] for _ in range(n): next_capitalized = True last_comma = last_fullstop = 0 word = None last = None p = [] # each paragraph contains out of 20 to 100 words. for idx, _ in enumerate(range(randrange(min, max))): while True: word = choice(words) if word != last: last = word break if next_capitalized: word = word.capitalize() next_capitalized = False # add commas if idx - randrange(3, 8) > last_comma: last_comma = idx last_fullstop += 2 word += "," # add end of sentences if idx - randrange(10, 20) > last_fullstop: last_comma = last_fullstop = idx word += "." next_capitalized = True p.append(word) # ensure that the paragraph ends with a dot. p = u" ".join(p) if p.endswith(","): p = p[:-1] + "." elif not p.endswith("."): p += "." result.append(p) if not html: return u"\n\n".join(result) return Markup(u"\n".join(u"<p>%s</p>" % escape(x) for x in result)) def unicode_urlencode(obj, charset="utf-8", for_qs=False): """Quote a string for use in a URL using the given charset. This function is misnamed, it is a wrapper around :func:`urllib.parse.quote`. :param obj: String or bytes to quote. Other types are converted to string then encoded to bytes using the given charset. :param charset: Encode text to bytes using this charset. :param for_qs: Quote "/" and use "+" for spaces. """ if not isinstance(obj, string_types): obj = text_type(obj) if isinstance(obj, text_type): obj = obj.encode(charset) safe = b"" if for_qs else b"/" rv = url_quote(obj, safe) if not isinstance(rv, text_type): rv = rv.decode("utf-8") if for_qs: rv = rv.replace("%20", "+") return rv class LRUCache(object): """A simple LRU Cache implementation.""" # this is fast for small capacities (something below 1000) but doesn't # scale. But as long as it's only used as storage for templates this # won't do any harm. def __init__(self, capacity): self.capacity = capacity self._mapping = {} self._queue = deque() self._postinit() def _postinit(self): # alias all queue methods for faster lookup self._popleft = self._queue.popleft self._pop = self._queue.pop self._remove = self._queue.remove self._wlock = Lock() self._append = self._queue.append def __getstate__(self): return { "capacity": self.capacity, "_mapping": self._mapping, "_queue": self._queue, } def __setstate__(self, d): self.__dict__.update(d) self._postinit() def __getnewargs__(self): return (self.capacity,) def copy(self): """Return a shallow copy of the instance.""" rv = self.__class__(self.capacity) rv._mapping.update(self._mapping) rv._queue.extend(self._queue) return rv def get(self, key, default=None): """Return an item from the cache dict or `default`""" try: return self[key] except KeyError: return default def setdefault(self, key, default=None): """Set `default` if the key is not in the cache otherwise leave unchanged. Return the value of this key. """ try: return self[key] except KeyError: self[key] = default return default def clear(self): """Clear the cache.""" self._wlock.acquire() try: self._mapping.clear() self._queue.clear() finally: self._wlock.release() def __contains__(self, key): """Check if a key exists in this cache.""" return key in self._mapping def __len__(self): """Return the current size of the cache.""" return len(self._mapping) def __repr__(self): return "<%s %r>" % (self.__class__.__name__, self._mapping) def __getitem__(self, key): """Get an item from the cache. Moves the item up so that it has the highest priority then. Raise a `KeyError` if it does not exist. """ self._wlock.acquire() try: rv = self._mapping[key] if self._queue[-1] != key: try: self._remove(key) except ValueError: # if something removed the key from the container # when we read, ignore the ValueError that we would # get otherwise. pass self._append(key) return rv finally: self._wlock.release() def __setitem__(self, key, value): """Sets the value for an item. Moves the item up so that it has the highest priority then. """ self._wlock.acquire() try: if key in self._mapping: self._remove(key) elif len(self._mapping) == self.capacity: del self._mapping[self._popleft()] self._append(key) self._mapping[key] = value finally: self._wlock.release() def __delitem__(self, key): """Remove an item from the cache dict. Raise a `KeyError` if it does not exist. """ self._wlock.acquire() try: del self._mapping[key] try: self._remove(key) except ValueError: pass finally: self._wlock.release() def items(self): """Return a list of items.""" result = [(key, self._mapping[key]) for key in list(self._queue)] result.reverse() return result def iteritems(self): """Iterate over all items.""" warnings.warn( "'iteritems()' will be removed in version 3.0. Use" " 'iter(cache.items())' instead.", DeprecationWarning, stacklevel=2, ) return iter(self.items()) def values(self): """Return a list of all values.""" return [x[1] for x in self.items()] def itervalue(self): """Iterate over all values.""" warnings.warn( "'itervalue()' will be removed in version 3.0. Use" " 'iter(cache.values())' instead.", DeprecationWarning, stacklevel=2, ) return iter(self.values()) def itervalues(self): """Iterate over all values.""" warnings.warn( "'itervalues()' will be removed in version 3.0. Use" " 'iter(cache.values())' instead.", DeprecationWarning, stacklevel=2, ) return iter(self.values()) def keys(self): """Return a list of all keys ordered by most recent usage.""" return list(self) def iterkeys(self): """Iterate over all keys in the cache dict, ordered by the most recent usage. """ warnings.warn( "'iterkeys()' will be removed in version 3.0. Use" " 'iter(cache.keys())' instead.", DeprecationWarning, stacklevel=2, ) return iter(self) def __iter__(self): return reversed(tuple(self._queue)) def __reversed__(self): """Iterate over the keys in the cache dict, oldest items coming first. """ return iter(tuple(self._queue)) __copy__ = copy abc.MutableMapping.register(LRUCache) def select_autoescape( enabled_extensions=("html", "htm", "xml"), disabled_extensions=(), default_for_string=True, default=False, ): """Intelligently sets the initial value of autoescaping based on the filename of the template. This is the recommended way to configure autoescaping if you do not want to write a custom function yourself. If you want to enable it for all templates created from strings or for all templates with `.html` and `.xml` extensions:: from jinja2 import Environment, select_autoescape env = Environment(autoescape=select_autoescape( enabled_extensions=('html', 'xml'), default_for_string=True, )) Example configuration to turn it on at all times except if the template ends with `.txt`:: from jinja2 import Environment, select_autoescape env = Environment(autoescape=select_autoescape( disabled_extensions=('txt',), default_for_string=True, default=True, )) The `enabled_extensions` is an iterable of all the extensions that autoescaping should be enabled for. Likewise `disabled_extensions` is a list of all templates it should be disabled for. If a template is loaded from a string then the default from `default_for_string` is used. If nothing matches then the initial value of autoescaping is set to the value of `default`. For security reasons this function operates case insensitive. .. versionadded:: 2.9 """ enabled_patterns = tuple("." + x.lstrip(".").lower() for x in enabled_extensions) disabled_patterns = tuple("." + x.lstrip(".").lower() for x in disabled_extensions) def autoescape(template_name): if template_name is None: return default_for_string template_name = template_name.lower() if template_name.endswith(enabled_patterns): return True if template_name.endswith(disabled_patterns): return False return default return autoescape def htmlsafe_json_dumps(obj, dumper=None, **kwargs): """Works exactly like :func:`dumps` but is safe for use in ``<script>`` tags. It accepts the same arguments and returns a JSON string. Note that this is available in templates through the ``|tojson`` filter which will also mark the result as safe. Due to how this function escapes certain characters this is safe even if used outside of ``<script>`` tags. The following characters are escaped in strings: - ``<`` - ``>`` - ``&`` - ``'`` This makes it safe to embed such strings in any place in HTML with the notable exception of double quoted attributes. In that case single quote your attributes or HTML escape it in addition. """ if dumper is None: dumper = json.dumps rv = ( dumper(obj, **kwargs) .replace(u"<", u"\\u003c") .replace(u">", u"\\u003e") .replace(u"&", u"\\u0026") .replace(u"'", u"\\u0027") ) return Markup(rv) class Cycler(object): """Cycle through values by yield them one at a time, then restarting once the end is reached. Available as ``cycler`` in templates. Similar to ``loop.cycle``, but can be used outside loops or across multiple loops. For example, render a list of folders and files in a list, alternating giving them "odd" and "even" classes. .. code-block:: html+jinja {% set row_class = cycler("odd", "even") %} <ul class="browser"> {% for folder in folders %} <li class="folder {{ row_class.next() }}">{{ folder }} {% endfor %} {% for file in files %} <li class="file {{ row_class.next() }}">{{ file }} {% endfor %} </ul> :param items: Each positional argument will be yielded in the order given for each cycle. .. versionadded:: 2.1 """ def __init__(self, *items): if not items: raise RuntimeError("at least one item has to be provided") self.items = items self.pos = 0 def reset(self): """Resets the current item to the first item.""" self.pos = 0 @property def current(self): """Return the current item. Equivalent to the item that will be returned next time :meth:`next` is called. """ return self.items[self.pos] def next(self): """Return the current item, then advance :attr:`current` to the next item. """ rv = self.current self.pos = (self.pos + 1) % len(self.items) return rv __next__ = next class Joiner(object): """A joining helper for templates.""" def __init__(self, sep=u", "): self.sep = sep self.used = False def __call__(self): if not self.used: self.used = True return u"" return self.sep class Namespace(object): """A namespace object that can hold arbitrary attributes. It may be initialized from a dictionary or with keyword arguments.""" def __init__(*args, **kwargs): # noqa: B902 self, args = args[0], args[1:] self.__attrs = dict(*args, **kwargs) def __getattribute__(self, name): # __class__ is needed for the awaitable check in async mode if name in {"_Namespace__attrs", "__class__"}: return object.__getattribute__(self, name) try: return self.__attrs[name] except KeyError: raise AttributeError(name) def __setitem__(self, name, value): self.__attrs[name] = value def __repr__(self): return "<Namespace %r>" % self.__attrs # does this python version support async for in and async generators? try: exec("async def _():\n async for _ in ():\n yield _") have_async_gen = True except SyntaxError: have_async_gen = False def soft_unicode(s): from markupsafe import soft_unicode warnings.warn( "'jinja2.utils.soft_unicode' will be removed in version 3.0." " Use 'markupsafe.soft_unicode' instead.", DeprecationWarning, stacklevel=2, ) return soft_unicode(s)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/debug.py
import sys from types import CodeType from . import TemplateSyntaxError from ._compat import PYPY from .utils import internal_code from .utils import missing def rewrite_traceback_stack(source=None): """Rewrite the current exception to replace any tracebacks from within compiled template code with tracebacks that look like they came from the template source. This must be called within an ``except`` block. :param exc_info: A :meth:`sys.exc_info` tuple. If not provided, the current ``exc_info`` is used. :param source: For ``TemplateSyntaxError``, the original source if known. :return: A :meth:`sys.exc_info` tuple that can be re-raised. """ exc_type, exc_value, tb = sys.exc_info() if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated: exc_value.translated = True exc_value.source = source try: # Remove the old traceback on Python 3, otherwise the frames # from the compiler still show up. exc_value.with_traceback(None) except AttributeError: pass # Outside of runtime, so the frame isn't executing template # code, but it still needs to point at the template. tb = fake_traceback( exc_value, None, exc_value.filename or "<unknown>", exc_value.lineno ) else: # Skip the frame for the render function. tb = tb.tb_next stack = [] # Build the stack of traceback object, replacing any in template # code with the source file and line information. while tb is not None: # Skip frames decorated with @internalcode. These are internal # calls that aren't useful in template debugging output. if tb.tb_frame.f_code in internal_code: tb = tb.tb_next continue template = tb.tb_frame.f_globals.get("__jinja_template__") if template is not None: lineno = template.get_corresponding_lineno(tb.tb_lineno) fake_tb = fake_traceback(exc_value, tb, template.filename, lineno) stack.append(fake_tb) else: stack.append(tb) tb = tb.tb_next tb_next = None # Assign tb_next in reverse to avoid circular references. for tb in reversed(stack): tb_next = tb_set_next(tb, tb_next) return exc_type, exc_value, tb_next def fake_traceback(exc_value, tb, filename, lineno): """Produce a new traceback object that looks like it came from the template source instead of the compiled code. The filename, line number, and location name will point to the template, and the local variables will be the current template context. :param exc_value: The original exception to be re-raised to create the new traceback. :param tb: The original traceback to get the local variables and code info from. :param filename: The template filename. :param lineno: The line number in the template source. """ if tb is not None: # Replace the real locals with the context that would be # available at that point in the template. locals = get_template_locals(tb.tb_frame.f_locals) locals.pop("__jinja_exception__", None) else: locals = {} globals = { "__name__": filename, "__file__": filename, "__jinja_exception__": exc_value, } # Raise an exception at the correct line number. code = compile("\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec") # Build a new code object that points to the template file and # replaces the location with a block name. try: location = "template" if tb is not None: function = tb.tb_frame.f_code.co_name if function == "root": location = "top-level template code" elif function.startswith("block_"): location = 'block "%s"' % function[6:] # Collect arguments for the new code object. CodeType only # accepts positional arguments, and arguments were inserted in # new Python versions. code_args = [] for attr in ( "argcount", "posonlyargcount", # Python 3.8 "kwonlyargcount", # Python 3 "nlocals", "stacksize", "flags", "code", # codestring "consts", # constants "names", "varnames", ("filename", filename), ("name", location), "firstlineno", "lnotab", "freevars", "cellvars", ): if isinstance(attr, tuple): # Replace with given value. code_args.append(attr[1]) continue try: # Copy original value if it exists. code_args.append(getattr(code, "co_" + attr)) except AttributeError: # Some arguments were added later. continue code = CodeType(*code_args) except Exception: # Some environments such as Google App Engine don't support # modifying code objects. pass # Execute the new code, which is guaranteed to raise, and return # the new traceback without this frame. try: exec(code, globals, locals) except BaseException: return sys.exc_info()[2].tb_next def get_template_locals(real_locals): """Based on the runtime locals, get the context that would be available at that point in the template. """ # Start with the current template context. ctx = real_locals.get("context") if ctx: data = ctx.get_all().copy() else: data = {} # Might be in a derived context that only sets local variables # rather than pushing a context. Local variables follow the scheme # l_depth_name. Find the highest-depth local that has a value for # each name. local_overrides = {} for name, value in real_locals.items(): if not name.startswith("l_") or value is missing: # Not a template variable, or no longer relevant. continue try: _, depth, name = name.split("_", 2) depth = int(depth) except ValueError: continue cur_depth = local_overrides.get(name, (-1,))[0] if cur_depth < depth: local_overrides[name] = (depth, value) # Modify the context with any derived context. for name, (_, value) in local_overrides.items(): if value is missing: data.pop(name, None) else: data[name] = value return data if sys.version_info >= (3, 7): # tb_next is directly assignable as of Python 3.7 def tb_set_next(tb, tb_next): tb.tb_next = tb_next return tb elif PYPY: # PyPy might have special support, and won't work with ctypes. try: import tputil except ImportError: # Without tproxy support, use the original traceback. def tb_set_next(tb, tb_next): return tb else: # With tproxy support, create a proxy around the traceback that # returns the new tb_next. def tb_set_next(tb, tb_next): def controller(op): if op.opname == "__getattribute__" and op.args[0] == "tb_next": return tb_next return op.delegate() return tputil.make_proxy(controller, obj=tb) else: # Use ctypes to assign tb_next at the C level since it's read-only # from Python. import ctypes class _CTraceback(ctypes.Structure): _fields_ = [ # Extra PyObject slots when compiled with Py_TRACE_REFS. ("PyObject_HEAD", ctypes.c_byte * object().__sizeof__()), # Only care about tb_next as an object, not a traceback. ("tb_next", ctypes.py_object), ] def tb_set_next(tb, tb_next): c_tb = _CTraceback.from_address(id(tb)) # Clear out the old tb_next. if tb.tb_next is not None: c_tb_next = ctypes.py_object(tb.tb_next) c_tb.tb_next = ctypes.py_object() ctypes.pythonapi.Py_DecRef(c_tb_next) # Assign the new tb_next. if tb_next is not None: c_tb_next = ctypes.py_object(tb_next) ctypes.pythonapi.Py_IncRef(c_tb_next) c_tb.tb_next = c_tb_next return tb
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/lexer.py
# -*- coding: utf-8 -*- """Implements a Jinja / Python combination lexer. The ``Lexer`` class is used to do some preprocessing. It filters out invalid operators like the bitshift operators we don't allow in templates. It separates template code and python code in expressions. """ import re from ast import literal_eval from collections import deque from operator import itemgetter from ._compat import implements_iterator from ._compat import intern from ._compat import iteritems from ._compat import text_type from .exceptions import TemplateSyntaxError from .utils import LRUCache # cache for the lexers. Exists in order to be able to have multiple # environments with the same lexer _lexer_cache = LRUCache(50) # static regular expressions whitespace_re = re.compile(r"\s+", re.U) newline_re = re.compile(r"(\r\n|\r|\n)") string_re = re.compile( r"('([^'\\]*(?:\\.[^'\\]*)*)'" r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S ) integer_re = re.compile(r"(\d+_)*\d+") float_re = re.compile( r""" (?<!\.) # doesn't start with a . (\d+_)*\d+ # digits, possibly _ separated ( (\.(\d+_)*\d+)? # optional fractional part e[+\-]?(\d+_)*\d+ # exponent part | \.(\d+_)*\d+ # required fractional part ) """, re.IGNORECASE | re.VERBOSE, ) try: # check if this Python supports Unicode identifiers compile("föö", "<unknown>", "eval") except SyntaxError: # Python 2, no Unicode support, use ASCII identifiers name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*") check_ident = False else: # Unicode support, import generated re pattern and set flag to use # str.isidentifier to validate during lexing. from ._identifier import pattern as name_re check_ident = True # internal the tokens and keep references to them TOKEN_ADD = intern("add") TOKEN_ASSIGN = intern("assign") TOKEN_COLON = intern("colon") TOKEN_COMMA = intern("comma") TOKEN_DIV = intern("div") TOKEN_DOT = intern("dot") TOKEN_EQ = intern("eq") TOKEN_FLOORDIV = intern("floordiv") TOKEN_GT = intern("gt") TOKEN_GTEQ = intern("gteq") TOKEN_LBRACE = intern("lbrace") TOKEN_LBRACKET = intern("lbracket") TOKEN_LPAREN = intern("lparen") TOKEN_LT = intern("lt") TOKEN_LTEQ = intern("lteq") TOKEN_MOD = intern("mod") TOKEN_MUL = intern("mul") TOKEN_NE = intern("ne") TOKEN_PIPE = intern("pipe") TOKEN_POW = intern("pow") TOKEN_RBRACE = intern("rbrace") TOKEN_RBRACKET = intern("rbracket") TOKEN_RPAREN = intern("rparen") TOKEN_SEMICOLON = intern("semicolon") TOKEN_SUB = intern("sub") TOKEN_TILDE = intern("tilde") TOKEN_WHITESPACE = intern("whitespace") TOKEN_FLOAT = intern("float") TOKEN_INTEGER = intern("integer") TOKEN_NAME = intern("name") TOKEN_STRING = intern("string") TOKEN_OPERATOR = intern("operator") TOKEN_BLOCK_BEGIN = intern("block_begin") TOKEN_BLOCK_END = intern("block_end") TOKEN_VARIABLE_BEGIN = intern("variable_begin") TOKEN_VARIABLE_END = intern("variable_end") TOKEN_RAW_BEGIN = intern("raw_begin") TOKEN_RAW_END = intern("raw_end") TOKEN_COMMENT_BEGIN = intern("comment_begin") TOKEN_COMMENT_END = intern("comment_end") TOKEN_COMMENT = intern("comment") TOKEN_LINESTATEMENT_BEGIN = intern("linestatement_begin") TOKEN_LINESTATEMENT_END = intern("linestatement_end") TOKEN_LINECOMMENT_BEGIN = intern("linecomment_begin") TOKEN_LINECOMMENT_END = intern("linecomment_end") TOKEN_LINECOMMENT = intern("linecomment") TOKEN_DATA = intern("data") TOKEN_INITIAL = intern("initial") TOKEN_EOF = intern("eof") # bind operators to token types operators = { "+": TOKEN_ADD, "-": TOKEN_SUB, "/": TOKEN_DIV, "//": TOKEN_FLOORDIV, "*": TOKEN_MUL, "%": TOKEN_MOD, "**": TOKEN_POW, "~": TOKEN_TILDE, "[": TOKEN_LBRACKET, "]": TOKEN_RBRACKET, "(": TOKEN_LPAREN, ")": TOKEN_RPAREN, "{": TOKEN_LBRACE, "}": TOKEN_RBRACE, "==": TOKEN_EQ, "!=": TOKEN_NE, ">": TOKEN_GT, ">=": TOKEN_GTEQ, "<": TOKEN_LT, "<=": TOKEN_LTEQ, "=": TOKEN_ASSIGN, ".": TOKEN_DOT, ":": TOKEN_COLON, "|": TOKEN_PIPE, ",": TOKEN_COMMA, ";": TOKEN_SEMICOLON, } reverse_operators = dict([(v, k) for k, v in iteritems(operators)]) assert len(operators) == len(reverse_operators), "operators dropped" operator_re = re.compile( "(%s)" % "|".join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x))) ) ignored_tokens = frozenset( [ TOKEN_COMMENT_BEGIN, TOKEN_COMMENT, TOKEN_COMMENT_END, TOKEN_WHITESPACE, TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END, TOKEN_LINECOMMENT, ] ) ignore_if_empty = frozenset( [TOKEN_WHITESPACE, TOKEN_DATA, TOKEN_COMMENT, TOKEN_LINECOMMENT] ) def _describe_token_type(token_type): if token_type in reverse_operators: return reverse_operators[token_type] return { TOKEN_COMMENT_BEGIN: "begin of comment", TOKEN_COMMENT_END: "end of comment", TOKEN_COMMENT: "comment", TOKEN_LINECOMMENT: "comment", TOKEN_BLOCK_BEGIN: "begin of statement block", TOKEN_BLOCK_END: "end of statement block", TOKEN_VARIABLE_BEGIN: "begin of print statement", TOKEN_VARIABLE_END: "end of print statement", TOKEN_LINESTATEMENT_BEGIN: "begin of line statement", TOKEN_LINESTATEMENT_END: "end of line statement", TOKEN_DATA: "template data / text", TOKEN_EOF: "end of template", }.get(token_type, token_type) def describe_token(token): """Returns a description of the token.""" if token.type == TOKEN_NAME: return token.value return _describe_token_type(token.type) def describe_token_expr(expr): """Like `describe_token` but for token expressions.""" if ":" in expr: type, value = expr.split(":", 1) if type == TOKEN_NAME: return value else: type = expr return _describe_token_type(type) def count_newlines(value): """Count the number of newline characters in the string. This is useful for extensions that filter a stream. """ return len(newline_re.findall(value)) def compile_rules(environment): """Compiles all the rules from the environment into a list of rules.""" e = re.escape rules = [ ( len(environment.comment_start_string), TOKEN_COMMENT_BEGIN, e(environment.comment_start_string), ), ( len(environment.block_start_string), TOKEN_BLOCK_BEGIN, e(environment.block_start_string), ), ( len(environment.variable_start_string), TOKEN_VARIABLE_BEGIN, e(environment.variable_start_string), ), ] if environment.line_statement_prefix is not None: rules.append( ( len(environment.line_statement_prefix), TOKEN_LINESTATEMENT_BEGIN, r"^[ \t\v]*" + e(environment.line_statement_prefix), ) ) if environment.line_comment_prefix is not None: rules.append( ( len(environment.line_comment_prefix), TOKEN_LINECOMMENT_BEGIN, r"(?:^|(?<=\S))[^\S\r\n]*" + e(environment.line_comment_prefix), ) ) return [x[1:] for x in sorted(rules, reverse=True)] class Failure(object): """Class that raises a `TemplateSyntaxError` if called. Used by the `Lexer` to specify known errors. """ def __init__(self, message, cls=TemplateSyntaxError): self.message = message self.error_class = cls def __call__(self, lineno, filename): raise self.error_class(self.message, lineno, filename) class Token(tuple): """Token class.""" __slots__ = () lineno, type, value = (property(itemgetter(x)) for x in range(3)) def __new__(cls, lineno, type, value): return tuple.__new__(cls, (lineno, intern(str(type)), value)) def __str__(self): if self.type in reverse_operators: return reverse_operators[self.type] elif self.type == "name": return self.value return self.type def test(self, expr): """Test a token against a token expression. This can either be a token type or ``'token_type:token_value'``. This can only test against string values and types. """ # here we do a regular string equality check as test_any is usually # passed an iterable of not interned strings. if self.type == expr: return True elif ":" in expr: return expr.split(":", 1) == [self.type, self.value] return False def test_any(self, *iterable): """Test against multiple token expressions.""" for expr in iterable: if self.test(expr): return True return False def __repr__(self): return "Token(%r, %r, %r)" % (self.lineno, self.type, self.value) @implements_iterator class TokenStreamIterator(object): """The iterator for tokenstreams. Iterate over the stream until the eof token is reached. """ def __init__(self, stream): self.stream = stream def __iter__(self): return self def __next__(self): token = self.stream.current if token.type is TOKEN_EOF: self.stream.close() raise StopIteration() next(self.stream) return token @implements_iterator class TokenStream(object): """A token stream is an iterable that yields :class:`Token`\\s. The parser however does not iterate over it but calls :meth:`next` to go one token ahead. The current active token is stored as :attr:`current`. """ def __init__(self, generator, name, filename): self._iter = iter(generator) self._pushed = deque() self.name = name self.filename = filename self.closed = False self.current = Token(1, TOKEN_INITIAL, "") next(self) def __iter__(self): return TokenStreamIterator(self) def __bool__(self): return bool(self._pushed) or self.current.type is not TOKEN_EOF __nonzero__ = __bool__ # py2 @property def eos(self): """Are we at the end of the stream?""" return not self def push(self, token): """Push a token back to the stream.""" self._pushed.append(token) def look(self): """Look at the next token.""" old_token = next(self) result = self.current self.push(result) self.current = old_token return result def skip(self, n=1): """Got n tokens ahead.""" for _ in range(n): next(self) def next_if(self, expr): """Perform the token test and return the token if it matched. Otherwise the return value is `None`. """ if self.current.test(expr): return next(self) def skip_if(self, expr): """Like :meth:`next_if` but only returns `True` or `False`.""" return self.next_if(expr) is not None def __next__(self): """Go one token ahead and return the old one. Use the built-in :func:`next` instead of calling this directly. """ rv = self.current if self._pushed: self.current = self._pushed.popleft() elif self.current.type is not TOKEN_EOF: try: self.current = next(self._iter) except StopIteration: self.close() return rv def close(self): """Close the stream.""" self.current = Token(self.current.lineno, TOKEN_EOF, "") self._iter = None self.closed = True def expect(self, expr): """Expect a given token type and return it. This accepts the same argument as :meth:`jinja2.lexer.Token.test`. """ if not self.current.test(expr): expr = describe_token_expr(expr) if self.current.type is TOKEN_EOF: raise TemplateSyntaxError( "unexpected end of template, expected %r." % expr, self.current.lineno, self.name, self.filename, ) raise TemplateSyntaxError( "expected token %r, got %r" % (expr, describe_token(self.current)), self.current.lineno, self.name, self.filename, ) try: return self.current finally: next(self) def get_lexer(environment): """Return a lexer which is probably cached.""" key = ( environment.block_start_string, environment.block_end_string, environment.variable_start_string, environment.variable_end_string, environment.comment_start_string, environment.comment_end_string, environment.line_statement_prefix, environment.line_comment_prefix, environment.trim_blocks, environment.lstrip_blocks, environment.newline_sequence, environment.keep_trailing_newline, ) lexer = _lexer_cache.get(key) if lexer is None: lexer = Lexer(environment) _lexer_cache[key] = lexer return lexer class OptionalLStrip(tuple): """A special tuple for marking a point in the state that can have lstrip applied. """ __slots__ = () # Even though it looks like a no-op, creating instances fails # without this. def __new__(cls, *members, **kwargs): return super(OptionalLStrip, cls).__new__(cls, members) class Lexer(object): """Class that implements a lexer for a given environment. Automatically created by the environment class, usually you don't have to do that. Note that the lexer is not automatically bound to an environment. Multiple environments can share the same lexer. """ def __init__(self, environment): # shortcuts e = re.escape def c(x): return re.compile(x, re.M | re.S) # lexing rules for tags tag_rules = [ (whitespace_re, TOKEN_WHITESPACE, None), (float_re, TOKEN_FLOAT, None), (integer_re, TOKEN_INTEGER, None), (name_re, TOKEN_NAME, None), (string_re, TOKEN_STRING, None), (operator_re, TOKEN_OPERATOR, None), ] # assemble the root lexing rule. because "|" is ungreedy # we have to sort by length so that the lexer continues working # as expected when we have parsing rules like <% for block and # <%= for variables. (if someone wants asp like syntax) # variables are just part of the rules if variable processing # is required. root_tag_rules = compile_rules(environment) # block suffix if trimming is enabled block_suffix_re = environment.trim_blocks and "\\n?" or "" # If lstrip is enabled, it should not be applied if there is any # non-whitespace between the newline and block. self.lstrip_unless_re = c(r"[^ \t]") if environment.lstrip_blocks else None self.newline_sequence = environment.newline_sequence self.keep_trailing_newline = environment.keep_trailing_newline # global lexing rules self.rules = { "root": [ # directives ( c( "(.*?)(?:%s)" % "|".join( [ r"(?P<raw_begin>%s(\-|\+|)\s*raw\s*(?:\-%s\s*|%s))" % ( e(environment.block_start_string), e(environment.block_end_string), e(environment.block_end_string), ) ] + [ r"(?P<%s>%s(\-|\+|))" % (n, r) for n, r in root_tag_rules ] ) ), OptionalLStrip(TOKEN_DATA, "#bygroup"), "#bygroup", ), # data (c(".+"), TOKEN_DATA, None), ], # comments TOKEN_COMMENT_BEGIN: [ ( c( r"(.*?)((?:\-%s\s*|%s)%s)" % ( e(environment.comment_end_string), e(environment.comment_end_string), block_suffix_re, ) ), (TOKEN_COMMENT, TOKEN_COMMENT_END), "#pop", ), (c("(.)"), (Failure("Missing end of comment tag"),), None), ], # blocks TOKEN_BLOCK_BEGIN: [ ( c( r"(?:\-%s\s*|%s)%s" % ( e(environment.block_end_string), e(environment.block_end_string), block_suffix_re, ) ), TOKEN_BLOCK_END, "#pop", ), ] + tag_rules, # variables TOKEN_VARIABLE_BEGIN: [ ( c( r"\-%s\s*|%s" % ( e(environment.variable_end_string), e(environment.variable_end_string), ) ), TOKEN_VARIABLE_END, "#pop", ) ] + tag_rules, # raw block TOKEN_RAW_BEGIN: [ ( c( r"(.*?)((?:%s(\-|\+|))\s*endraw\s*(?:\-%s\s*|%s%s))" % ( e(environment.block_start_string), e(environment.block_end_string), e(environment.block_end_string), block_suffix_re, ) ), OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END), "#pop", ), (c("(.)"), (Failure("Missing end of raw directive"),), None), ], # line statements TOKEN_LINESTATEMENT_BEGIN: [ (c(r"\s*(\n|$)"), TOKEN_LINESTATEMENT_END, "#pop") ] + tag_rules, # line comments TOKEN_LINECOMMENT_BEGIN: [ ( c(r"(.*?)()(?=\n|$)"), (TOKEN_LINECOMMENT, TOKEN_LINECOMMENT_END), "#pop", ) ], } def _normalize_newlines(self, value): """Called for strings and template data to normalize it to unicode.""" return newline_re.sub(self.newline_sequence, value) def tokenize(self, source, name=None, filename=None, state=None): """Calls tokeniter + tokenize and wraps it in a token stream.""" stream = self.tokeniter(source, name, filename, state) return TokenStream(self.wrap(stream, name, filename), name, filename) def wrap(self, stream, name=None, filename=None): """This is called with the stream as returned by `tokenize` and wraps every token in a :class:`Token` and converts the value. """ for lineno, token, value in stream: if token in ignored_tokens: continue elif token == TOKEN_LINESTATEMENT_BEGIN: token = TOKEN_BLOCK_BEGIN elif token == TOKEN_LINESTATEMENT_END: token = TOKEN_BLOCK_END # we are not interested in those tokens in the parser elif token in (TOKEN_RAW_BEGIN, TOKEN_RAW_END): continue elif token == TOKEN_DATA: value = self._normalize_newlines(value) elif token == "keyword": token = value elif token == TOKEN_NAME: value = str(value) if check_ident and not value.isidentifier(): raise TemplateSyntaxError( "Invalid character in identifier", lineno, name, filename ) elif token == TOKEN_STRING: # try to unescape string try: value = ( self._normalize_newlines(value[1:-1]) .encode("ascii", "backslashreplace") .decode("unicode-escape") ) except Exception as e: msg = str(e).split(":")[-1].strip() raise TemplateSyntaxError(msg, lineno, name, filename) elif token == TOKEN_INTEGER: value = int(value.replace("_", "")) elif token == TOKEN_FLOAT: # remove all "_" first to support more Python versions value = literal_eval(value.replace("_", "")) elif token == TOKEN_OPERATOR: token = operators[value] yield Token(lineno, token, value) def tokeniter(self, source, name, filename=None, state=None): """This method tokenizes the text and returns the tokens in a generator. Use this method if you just want to tokenize a template. """ source = text_type(source) lines = source.splitlines() if self.keep_trailing_newline and source: for newline in ("\r\n", "\r", "\n"): if source.endswith(newline): lines.append("") break source = "\n".join(lines) pos = 0 lineno = 1 stack = ["root"] if state is not None and state != "root": assert state in ("variable", "block"), "invalid state" stack.append(state + "_begin") statetokens = self.rules[stack[-1]] source_length = len(source) balancing_stack = [] lstrip_unless_re = self.lstrip_unless_re newlines_stripped = 0 line_starting = True while 1: # tokenizer loop for regex, tokens, new_state in statetokens: m = regex.match(source, pos) # if no match we try again with the next rule if m is None: continue # we only match blocks and variables if braces / parentheses # are balanced. continue parsing with the lower rule which # is the operator rule. do this only if the end tags look # like operators if balancing_stack and tokens in ( TOKEN_VARIABLE_END, TOKEN_BLOCK_END, TOKEN_LINESTATEMENT_END, ): continue # tuples support more options if isinstance(tokens, tuple): groups = m.groups() if isinstance(tokens, OptionalLStrip): # Rule supports lstrip. Match will look like # text, block type, whitespace control, type, control, ... text = groups[0] # Skipping the text and first type, every other group is the # whitespace control for each type. One of the groups will be # -, +, or empty string instead of None. strip_sign = next(g for g in groups[2::2] if g is not None) if strip_sign == "-": # Strip all whitespace between the text and the tag. stripped = text.rstrip() newlines_stripped = text[len(stripped) :].count("\n") groups = (stripped,) + groups[1:] elif ( # Not marked for preserving whitespace. strip_sign != "+" # lstrip is enabled. and lstrip_unless_re is not None # Not a variable expression. and not m.groupdict().get(TOKEN_VARIABLE_BEGIN) ): # The start of text between the last newline and the tag. l_pos = text.rfind("\n") + 1 if l_pos > 0 or line_starting: # If there's only whitespace between the newline and the # tag, strip it. if not lstrip_unless_re.search(text, l_pos): groups = (text[:l_pos],) + groups[1:] for idx, token in enumerate(tokens): # failure group if token.__class__ is Failure: raise token(lineno, filename) # bygroup is a bit more complex, in that case we # yield for the current token the first named # group that matched elif token == "#bygroup": for key, value in iteritems(m.groupdict()): if value is not None: yield lineno, key, value lineno += value.count("\n") break else: raise RuntimeError( "%r wanted to resolve " "the token dynamically" " but no group matched" % regex ) # normal group else: data = groups[idx] if data or token not in ignore_if_empty: yield lineno, token, data lineno += data.count("\n") + newlines_stripped newlines_stripped = 0 # strings as token just are yielded as it. else: data = m.group() # update brace/parentheses balance if tokens == TOKEN_OPERATOR: if data == "{": balancing_stack.append("}") elif data == "(": balancing_stack.append(")") elif data == "[": balancing_stack.append("]") elif data in ("}", ")", "]"): if not balancing_stack: raise TemplateSyntaxError( "unexpected '%s'" % data, lineno, name, filename ) expected_op = balancing_stack.pop() if expected_op != data: raise TemplateSyntaxError( "unexpected '%s', " "expected '%s'" % (data, expected_op), lineno, name, filename, ) # yield items if data or tokens not in ignore_if_empty: yield lineno, tokens, data lineno += data.count("\n") line_starting = m.group()[-1:] == "\n" # fetch new position into new variable so that we can check # if there is a internal parsing error which would result # in an infinite loop pos2 = m.end() # handle state changes if new_state is not None: # remove the uppermost state if new_state == "#pop": stack.pop() # resolve the new state by group checking elif new_state == "#bygroup": for key, value in iteritems(m.groupdict()): if value is not None: stack.append(key) break else: raise RuntimeError( "%r wanted to resolve the " "new state dynamically but" " no group matched" % regex ) # direct state name given else: stack.append(new_state) statetokens = self.rules[stack[-1]] # we are still at the same position and no stack change. # this means a loop without break condition, avoid that and # raise error elif pos2 == pos: raise RuntimeError( "%r yielded empty string without stack change" % regex ) # publish new function and start again pos = pos2 break # if loop terminated without break we haven't found a single match # either we are at the end of the file or we have a problem else: # end of text if pos >= source_length: return # something went wrong raise TemplateSyntaxError( "unexpected char %r at %d" % (source[pos], pos), lineno, name, filename, )
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/environment.py
# -*- coding: utf-8 -*- """Classes for managing templates and their runtime and compile time options. """ import os import sys import weakref from functools import partial from functools import reduce from markupsafe import Markup from . import nodes from ._compat import encode_filename from ._compat import implements_iterator from ._compat import implements_to_string from ._compat import iteritems from ._compat import PY2 from ._compat import PYPY from ._compat import reraise from ._compat import string_types from ._compat import text_type from .compiler import CodeGenerator from .compiler import generate from .defaults import BLOCK_END_STRING from .defaults import BLOCK_START_STRING from .defaults import COMMENT_END_STRING from .defaults import COMMENT_START_STRING from .defaults import DEFAULT_FILTERS from .defaults import DEFAULT_NAMESPACE from .defaults import DEFAULT_POLICIES from .defaults import DEFAULT_TESTS from .defaults import KEEP_TRAILING_NEWLINE from .defaults import LINE_COMMENT_PREFIX from .defaults import LINE_STATEMENT_PREFIX from .defaults import LSTRIP_BLOCKS from .defaults import NEWLINE_SEQUENCE from .defaults import TRIM_BLOCKS from .defaults import VARIABLE_END_STRING from .defaults import VARIABLE_START_STRING from .exceptions import TemplateNotFound from .exceptions import TemplateRuntimeError from .exceptions import TemplatesNotFound from .exceptions import TemplateSyntaxError from .exceptions import UndefinedError from .lexer import get_lexer from .lexer import TokenStream from .nodes import EvalContext from .parser import Parser from .runtime import Context from .runtime import new_context from .runtime import Undefined from .utils import concat from .utils import consume from .utils import have_async_gen from .utils import import_string from .utils import internalcode from .utils import LRUCache from .utils import missing # for direct template usage we have up to ten living environments _spontaneous_environments = LRUCache(10) def get_spontaneous_environment(cls, *args): """Return a new spontaneous environment. A spontaneous environment is used for templates created directly rather than through an existing environment. :param cls: Environment class to create. :param args: Positional arguments passed to environment. """ key = (cls, args) try: return _spontaneous_environments[key] except KeyError: _spontaneous_environments[key] = env = cls(*args) env.shared = True return env def create_cache(size): """Return the cache class for the given size.""" if size == 0: return None if size < 0: return {} return LRUCache(size) def copy_cache(cache): """Create an empty copy of the given cache.""" if cache is None: return None elif type(cache) is dict: return {} return LRUCache(cache.capacity) def load_extensions(environment, extensions): """Load the extensions from the list and bind it to the environment. Returns a dict of instantiated environments. """ result = {} for extension in extensions: if isinstance(extension, string_types): extension = import_string(extension) result[extension.identifier] = extension(environment) return result def fail_for_missing_callable(string, name): msg = string % name if isinstance(name, Undefined): try: name._fail_with_undefined_error() except Exception as e: msg = "%s (%s; did you forget to quote the callable name?)" % (msg, e) raise TemplateRuntimeError(msg) def _environment_sanity_check(environment): """Perform a sanity check on the environment.""" assert issubclass( environment.undefined, Undefined ), "undefined must be a subclass of undefined because filters depend on it." assert ( environment.block_start_string != environment.variable_start_string != environment.comment_start_string ), "block, variable and comment start strings must be different" assert environment.newline_sequence in ( "\r", "\r\n", "\n", ), "newline_sequence set to unknown line ending string." return environment class Environment(object): r"""The core component of Jinja is the `Environment`. It contains important shared variables like configuration, filters, tests, globals and others. Instances of this class may be modified if they are not shared and if no template was loaded so far. Modifications on environments after the first template was loaded will lead to surprising effects and undefined behavior. Here are the possible initialization parameters: `block_start_string` The string marking the beginning of a block. Defaults to ``'{%'``. `block_end_string` The string marking the end of a block. Defaults to ``'%}'``. `variable_start_string` The string marking the beginning of a print statement. Defaults to ``'{{'``. `variable_end_string` The string marking the end of a print statement. Defaults to ``'}}'``. `comment_start_string` The string marking the beginning of a comment. Defaults to ``'{#'``. `comment_end_string` The string marking the end of a comment. Defaults to ``'#}'``. `line_statement_prefix` If given and a string, this will be used as prefix for line based statements. See also :ref:`line-statements`. `line_comment_prefix` If given and a string, this will be used as prefix for line based comments. See also :ref:`line-statements`. .. versionadded:: 2.2 `trim_blocks` If this is set to ``True`` the first newline after a block is removed (block, not variable tag!). Defaults to `False`. `lstrip_blocks` If this is set to ``True`` leading spaces and tabs are stripped from the start of a line to a block. Defaults to `False`. `newline_sequence` The sequence that starts a newline. Must be one of ``'\r'``, ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a useful default for Linux and OS X systems as well as web applications. `keep_trailing_newline` Preserve the trailing newline when rendering templates. The default is ``False``, which causes a single newline, if present, to be stripped from the end of the template. .. versionadded:: 2.7 `extensions` List of Jinja extensions to use. This can either be import paths as strings or extension classes. For more information have a look at :ref:`the extensions documentation <jinja-extensions>`. `optimized` should the optimizer be enabled? Default is ``True``. `undefined` :class:`Undefined` or a subclass of it that is used to represent undefined values in the template. `finalize` A callable that can be used to process the result of a variable expression before it is output. For example one can convert ``None`` implicitly into an empty string here. `autoescape` If set to ``True`` the XML/HTML autoescaping feature is enabled by default. For more details about autoescaping see :class:`~markupsafe.Markup`. As of Jinja 2.4 this can also be a callable that is passed the template name and has to return ``True`` or ``False`` depending on autoescape should be enabled by default. .. versionchanged:: 2.4 `autoescape` can now be a function `loader` The template loader for this environment. `cache_size` The size of the cache. Per default this is ``400`` which means that if more than 400 templates are loaded the loader will clean out the least recently used template. If the cache size is set to ``0`` templates are recompiled all the time, if the cache size is ``-1`` the cache will not be cleaned. .. versionchanged:: 2.8 The cache size was increased to 400 from a low 50. `auto_reload` Some loaders load templates from locations where the template sources may change (ie: file system or database). If ``auto_reload`` is set to ``True`` (default) every time a template is requested the loader checks if the source changed and if yes, it will reload the template. For higher performance it's possible to disable that. `bytecode_cache` If set to a bytecode cache object, this object will provide a cache for the internal Jinja bytecode so that templates don't have to be parsed if they were not changed. See :ref:`bytecode-cache` for more information. `enable_async` If set to true this enables async template execution which allows you to take advantage of newer Python features. This requires Python 3.6 or later. """ #: if this environment is sandboxed. Modifying this variable won't make #: the environment sandboxed though. For a real sandboxed environment #: have a look at jinja2.sandbox. This flag alone controls the code #: generation by the compiler. sandboxed = False #: True if the environment is just an overlay overlayed = False #: the environment this environment is linked to if it is an overlay linked_to = None #: shared environments have this set to `True`. A shared environment #: must not be modified shared = False #: the class that is used for code generation. See #: :class:`~jinja2.compiler.CodeGenerator` for more information. code_generator_class = CodeGenerator #: the context class thatis used for templates. See #: :class:`~jinja2.runtime.Context` for more information. context_class = Context def __init__( self, block_start_string=BLOCK_START_STRING, block_end_string=BLOCK_END_STRING, variable_start_string=VARIABLE_START_STRING, variable_end_string=VARIABLE_END_STRING, comment_start_string=COMMENT_START_STRING, comment_end_string=COMMENT_END_STRING, line_statement_prefix=LINE_STATEMENT_PREFIX, line_comment_prefix=LINE_COMMENT_PREFIX, trim_blocks=TRIM_BLOCKS, lstrip_blocks=LSTRIP_BLOCKS, newline_sequence=NEWLINE_SEQUENCE, keep_trailing_newline=KEEP_TRAILING_NEWLINE, extensions=(), optimized=True, undefined=Undefined, finalize=None, autoescape=False, loader=None, cache_size=400, auto_reload=True, bytecode_cache=None, enable_async=False, ): # !!Important notice!! # The constructor accepts quite a few arguments that should be # passed by keyword rather than position. However it's important to # not change the order of arguments because it's used at least # internally in those cases: # - spontaneous environments (i18n extension and Template) # - unittests # If parameter changes are required only add parameters at the end # and don't change the arguments (or the defaults!) of the arguments # existing already. # lexer / parser information self.block_start_string = block_start_string self.block_end_string = block_end_string self.variable_start_string = variable_start_string self.variable_end_string = variable_end_string self.comment_start_string = comment_start_string self.comment_end_string = comment_end_string self.line_statement_prefix = line_statement_prefix self.line_comment_prefix = line_comment_prefix self.trim_blocks = trim_blocks self.lstrip_blocks = lstrip_blocks self.newline_sequence = newline_sequence self.keep_trailing_newline = keep_trailing_newline # runtime information self.undefined = undefined self.optimized = optimized self.finalize = finalize self.autoescape = autoescape # defaults self.filters = DEFAULT_FILTERS.copy() self.tests = DEFAULT_TESTS.copy() self.globals = DEFAULT_NAMESPACE.copy() # set the loader provided self.loader = loader self.cache = create_cache(cache_size) self.bytecode_cache = bytecode_cache self.auto_reload = auto_reload # configurable policies self.policies = DEFAULT_POLICIES.copy() # load extensions self.extensions = load_extensions(self, extensions) self.enable_async = enable_async self.is_async = self.enable_async and have_async_gen if self.is_async: # runs patch_all() to enable async support from . import asyncsupport # noqa: F401 _environment_sanity_check(self) def add_extension(self, extension): """Adds an extension after the environment was created. .. versionadded:: 2.5 """ self.extensions.update(load_extensions(self, [extension])) def extend(self, **attributes): """Add the items to the instance of the environment if they do not exist yet. This is used by :ref:`extensions <writing-extensions>` to register callbacks and configuration values without breaking inheritance. """ for key, value in iteritems(attributes): if not hasattr(self, key): setattr(self, key, value) def overlay( self, block_start_string=missing, block_end_string=missing, variable_start_string=missing, variable_end_string=missing, comment_start_string=missing, comment_end_string=missing, line_statement_prefix=missing, line_comment_prefix=missing, trim_blocks=missing, lstrip_blocks=missing, extensions=missing, optimized=missing, undefined=missing, finalize=missing, autoescape=missing, loader=missing, cache_size=missing, auto_reload=missing, bytecode_cache=missing, ): """Create a new overlay environment that shares all the data with the current environment except for cache and the overridden attributes. Extensions cannot be removed for an overlayed environment. An overlayed environment automatically gets all the extensions of the environment it is linked to plus optional extra extensions. Creating overlays should happen after the initial environment was set up completely. Not all attributes are truly linked, some are just copied over so modifications on the original environment may not shine through. """ args = dict(locals()) del args["self"], args["cache_size"], args["extensions"] rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.overlayed = True rv.linked_to = self for key, value in iteritems(args): if value is not missing: setattr(rv, key, value) if cache_size is not missing: rv.cache = create_cache(cache_size) else: rv.cache = copy_cache(self.cache) rv.extensions = {} for key, value in iteritems(self.extensions): rv.extensions[key] = value.bind(rv) if extensions is not missing: rv.extensions.update(load_extensions(rv, extensions)) return _environment_sanity_check(rv) lexer = property(get_lexer, doc="The lexer for this environment.") def iter_extensions(self): """Iterates over the extensions by priority.""" return iter(sorted(self.extensions.values(), key=lambda x: x.priority)) def getitem(self, obj, argument): """Get an item or attribute of an object but prefer the item.""" try: return obj[argument] except (AttributeError, TypeError, LookupError): if isinstance(argument, string_types): try: attr = str(argument) except Exception: pass else: try: return getattr(obj, attr) except AttributeError: pass return self.undefined(obj=obj, name=argument) def getattr(self, obj, attribute): """Get an item or attribute of an object but prefer the attribute. Unlike :meth:`getitem` the attribute *must* be a bytestring. """ try: return getattr(obj, attribute) except AttributeError: pass try: return obj[attribute] except (TypeError, LookupError, AttributeError): return self.undefined(obj=obj, name=attribute) def call_filter( self, name, value, args=None, kwargs=None, context=None, eval_ctx=None ): """Invokes a filter on a value the same way the compiler does it. Note that on Python 3 this might return a coroutine in case the filter is running from an environment in async mode and the filter supports async execution. It's your responsibility to await this if needed. .. versionadded:: 2.7 """ func = self.filters.get(name) if func is None: fail_for_missing_callable("no filter named %r", name) args = [value] + list(args or ()) if getattr(func, "contextfilter", False) is True: if context is None: raise TemplateRuntimeError( "Attempted to invoke context filter without context" ) args.insert(0, context) elif getattr(func, "evalcontextfilter", False) is True: if eval_ctx is None: if context is not None: eval_ctx = context.eval_ctx else: eval_ctx = EvalContext(self) args.insert(0, eval_ctx) elif getattr(func, "environmentfilter", False) is True: args.insert(0, self) return func(*args, **(kwargs or {})) def call_test(self, name, value, args=None, kwargs=None): """Invokes a test on a value the same way the compiler does it. .. versionadded:: 2.7 """ func = self.tests.get(name) if func is None: fail_for_missing_callable("no test named %r", name) return func(value, *(args or ()), **(kwargs or {})) @internalcode def parse(self, source, name=None, filename=None): """Parse the sourcecode and return the abstract syntax tree. This tree of nodes is used by the compiler to convert the template into executable source- or bytecode. This is useful for debugging or to extract information from templates. If you are :ref:`developing Jinja extensions <writing-extensions>` this gives you a good overview of the node tree generated. """ try: return self._parse(source, name, filename) except TemplateSyntaxError: self.handle_exception(source=source) def _parse(self, source, name, filename): """Internal parsing function used by `parse` and `compile`.""" return Parser(self, source, name, encode_filename(filename)).parse() def lex(self, source, name=None, filename=None): """Lex the given sourcecode and return a generator that yields tokens as tuples in the form ``(lineno, token_type, value)``. This can be useful for :ref:`extension development <writing-extensions>` and debugging templates. This does not perform preprocessing. If you want the preprocessing of the extensions to be applied you have to filter source through the :meth:`preprocess` method. """ source = text_type(source) try: return self.lexer.tokeniter(source, name, filename) except TemplateSyntaxError: self.handle_exception(source=source) def preprocess(self, source, name=None, filename=None): """Preprocesses the source with all extensions. This is automatically called for all parsing and compiling methods but *not* for :meth:`lex` because there you usually only want the actual source tokenized. """ return reduce( lambda s, e: e.preprocess(s, name, filename), self.iter_extensions(), text_type(source), ) def _tokenize(self, source, name, filename=None, state=None): """Called by the parser to do the preprocessing and filtering for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`. """ source = self.preprocess(source, name, filename) stream = self.lexer.tokenize(source, name, filename, state) for ext in self.iter_extensions(): stream = ext.filter_stream(stream) if not isinstance(stream, TokenStream): stream = TokenStream(stream, name, filename) return stream def _generate(self, source, name, filename, defer_init=False): """Internal hook that can be overridden to hook a different generate method in. .. versionadded:: 2.5 """ return generate( source, self, name, filename, defer_init=defer_init, optimized=self.optimized, ) def _compile(self, source, filename): """Internal hook that can be overridden to hook a different compile method in. .. versionadded:: 2.5 """ return compile(source, filename, "exec") @internalcode def compile(self, source, name=None, filename=None, raw=False, defer_init=False): """Compile a node or template source code. The `name` parameter is the load name of the template after it was joined using :meth:`join_path` if necessary, not the filename on the file system. the `filename` parameter is the estimated filename of the template on the file system. If the template came from a database or memory this can be omitted. The return value of this method is a python code object. If the `raw` parameter is `True` the return value will be a string with python code equivalent to the bytecode returned otherwise. This method is mainly used internally. `defer_init` is use internally to aid the module code generator. This causes the generated code to be able to import without the global environment variable to be set. .. versionadded:: 2.4 `defer_init` parameter added. """ source_hint = None try: if isinstance(source, string_types): source_hint = source source = self._parse(source, name, filename) source = self._generate(source, name, filename, defer_init=defer_init) if raw: return source if filename is None: filename = "<template>" else: filename = encode_filename(filename) return self._compile(source, filename) except TemplateSyntaxError: self.handle_exception(source=source_hint) def compile_expression(self, source, undefined_to_none=True): """A handy helper method that returns a callable that accepts keyword arguments that appear as variables in the expression. If called it returns the result of the expression. This is useful if applications want to use the same rules as Jinja in template "configuration files" or similar situations. Example usage: >>> env = Environment() >>> expr = env.compile_expression('foo == 42') >>> expr(foo=23) False >>> expr(foo=42) True Per default the return value is converted to `None` if the expression returns an undefined value. This can be changed by setting `undefined_to_none` to `False`. >>> env.compile_expression('var')() is None True >>> env.compile_expression('var', undefined_to_none=False)() Undefined .. versionadded:: 2.1 """ parser = Parser(self, source, state="variable") try: expr = parser.parse_expression() if not parser.stream.eos: raise TemplateSyntaxError( "chunk after expression", parser.stream.current.lineno, None, None ) expr.set_environment(self) except TemplateSyntaxError: if sys.exc_info() is not None: self.handle_exception(source=source) body = [nodes.Assign(nodes.Name("result", "store"), expr, lineno=1)] template = self.from_string(nodes.Template(body, lineno=1)) return TemplateExpression(template, undefined_to_none) def compile_templates( self, target, extensions=None, filter_func=None, zip="deflated", log_function=None, ignore_errors=True, py_compile=False, ): """Finds all the templates the loader can find, compiles them and stores them in `target`. If `zip` is `None`, instead of in a zipfile, the templates will be stored in a directory. By default a deflate zip algorithm is used. To switch to the stored algorithm, `zip` can be set to ``'stored'``. `extensions` and `filter_func` are passed to :meth:`list_templates`. Each template returned will be compiled to the target folder or zipfile. By default template compilation errors are ignored. In case a log function is provided, errors are logged. If you want template syntax errors to abort the compilation you can set `ignore_errors` to `False` and you will get an exception on syntax errors. If `py_compile` is set to `True` .pyc files will be written to the target instead of standard .py files. This flag does not do anything on pypy and Python 3 where pyc files are not picked up by itself and don't give much benefit. .. versionadded:: 2.4 """ from .loaders import ModuleLoader if log_function is None: def log_function(x): pass if py_compile: if not PY2 or PYPY: import warnings warnings.warn( "'py_compile=True' has no effect on PyPy or Python" " 3 and will be removed in version 3.0", DeprecationWarning, stacklevel=2, ) py_compile = False else: import imp import marshal py_header = imp.get_magic() + u"\xff\xff\xff\xff".encode("iso-8859-15") # Python 3.3 added a source filesize to the header if sys.version_info >= (3, 3): py_header += u"\x00\x00\x00\x00".encode("iso-8859-15") def write_file(filename, data): if zip: info = ZipInfo(filename) info.external_attr = 0o755 << 16 zip_file.writestr(info, data) else: if isinstance(data, text_type): data = data.encode("utf8") with open(os.path.join(target, filename), "wb") as f: f.write(data) if zip is not None: from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED zip_file = ZipFile( target, "w", dict(deflated=ZIP_DEFLATED, stored=ZIP_STORED)[zip] ) log_function('Compiling into Zip archive "%s"' % target) else: if not os.path.isdir(target): os.makedirs(target) log_function('Compiling into folder "%s"' % target) try: for name in self.list_templates(extensions, filter_func): source, filename, _ = self.loader.get_source(self, name) try: code = self.compile(source, name, filename, True, True) except TemplateSyntaxError as e: if not ignore_errors: raise log_function('Could not compile "%s": %s' % (name, e)) continue filename = ModuleLoader.get_module_filename(name) if py_compile: c = self._compile(code, encode_filename(filename)) write_file(filename + "c", py_header + marshal.dumps(c)) log_function('Byte-compiled "%s" as %s' % (name, filename + "c")) else: write_file(filename, code) log_function('Compiled "%s" as %s' % (name, filename)) finally: if zip: zip_file.close() log_function("Finished compiling templates") def list_templates(self, extensions=None, filter_func=None): """Returns a list of templates for this environment. This requires that the loader supports the loader's :meth:`~BaseLoader.list_templates` method. If there are other files in the template folder besides the actual templates, the returned list can be filtered. There are two ways: either `extensions` is set to a list of file extensions for templates, or a `filter_func` can be provided which is a callable that is passed a template name and should return `True` if it should end up in the result list. If the loader does not support that, a :exc:`TypeError` is raised. .. versionadded:: 2.4 """ names = self.loader.list_templates() if extensions is not None: if filter_func is not None: raise TypeError( "either extensions or filter_func can be passed, but not both" ) def filter_func(x): return "." in x and x.rsplit(".", 1)[1] in extensions if filter_func is not None: names = [name for name in names if filter_func(name)] return names def handle_exception(self, source=None): """Exception handling helper. This is used internally to either raise rewritten exceptions or return a rendered traceback for the template. """ from .debug import rewrite_traceback_stack reraise(*rewrite_traceback_stack(source=source)) def join_path(self, template, parent): """Join a template with the parent. By default all the lookups are relative to the loader root so this method returns the `template` parameter unchanged, but if the paths should be relative to the parent template, this function can be used to calculate the real template name. Subclasses may override this method and implement template path joining here. """ return template @internalcode def _load_template(self, name, globals): if self.loader is None: raise TypeError("no loader for this environment specified") cache_key = (weakref.ref(self.loader), name) if self.cache is not None: template = self.cache.get(cache_key) if template is not None and ( not self.auto_reload or template.is_up_to_date ): return template template = self.loader.load(self, name, globals) if self.cache is not None: self.cache[cache_key] = template return template @internalcode def get_template(self, name, parent=None, globals=None): """Load a template from the loader. If a loader is configured this method asks the loader for the template and returns a :class:`Template`. If the `parent` parameter is not `None`, :meth:`join_path` is called to get the real template name before loading. The `globals` parameter can be used to provide template wide globals. These variables are available in the context at render time. If the template does not exist a :exc:`TemplateNotFound` exception is raised. .. versionchanged:: 2.4 If `name` is a :class:`Template` object it is returned from the function unchanged. """ if isinstance(name, Template): return name if parent is not None: name = self.join_path(name, parent) return self._load_template(name, self.make_globals(globals)) @internalcode def select_template(self, names, parent=None, globals=None): """Works like :meth:`get_template` but tries a number of templates before it fails. If it cannot find any of the templates, it will raise a :exc:`TemplatesNotFound` exception. .. versionchanged:: 2.11 If names is :class:`Undefined`, an :exc:`UndefinedError` is raised instead. If no templates were found and names contains :class:`Undefined`, the message is more helpful. .. versionchanged:: 2.4 If `names` contains a :class:`Template` object it is returned from the function unchanged. .. versionadded:: 2.3 """ if isinstance(names, Undefined): names._fail_with_undefined_error() if not names: raise TemplatesNotFound( message=u"Tried to select from an empty list " u"of templates." ) globals = self.make_globals(globals) for name in names: if isinstance(name, Template): return name if parent is not None: name = self.join_path(name, parent) try: return self._load_template(name, globals) except (TemplateNotFound, UndefinedError): pass raise TemplatesNotFound(names) @internalcode def get_or_select_template(self, template_name_or_list, parent=None, globals=None): """Does a typecheck and dispatches to :meth:`select_template` if an iterable of template names is given, otherwise to :meth:`get_template`. .. versionadded:: 2.3 """ if isinstance(template_name_or_list, (string_types, Undefined)): return self.get_template(template_name_or_list, parent, globals) elif isinstance(template_name_or_list, Template): return template_name_or_list return self.select_template(template_name_or_list, parent, globals) def from_string(self, source, globals=None, template_class=None): """Load a template from a string. This parses the source given and returns a :class:`Template` object. """ globals = self.make_globals(globals) cls = template_class or self.template_class return cls.from_code(self, self.compile(source), globals, None) def make_globals(self, d): """Return a dict for the globals.""" if not d: return self.globals return dict(self.globals, **d) class Template(object): """The central template object. This class represents a compiled template and is used to evaluate it. Normally the template object is generated from an :class:`Environment` but it also has a constructor that makes it possible to create a template instance directly using the constructor. It takes the same arguments as the environment constructor but it's not possible to specify a loader. Every template object has a few methods and members that are guaranteed to exist. However it's important that a template object should be considered immutable. Modifications on the object are not supported. Template objects created from the constructor rather than an environment do have an `environment` attribute that points to a temporary environment that is probably shared with other templates created with the constructor and compatible settings. >>> template = Template('Hello {{ name }}!') >>> template.render(name='John Doe') == u'Hello John Doe!' True >>> stream = template.stream(name='John Doe') >>> next(stream) == u'Hello John Doe!' True >>> next(stream) Traceback (most recent call last): ... StopIteration """ #: Type of environment to create when creating a template directly #: rather than through an existing environment. environment_class = Environment def __new__( cls, source, block_start_string=BLOCK_START_STRING, block_end_string=BLOCK_END_STRING, variable_start_string=VARIABLE_START_STRING, variable_end_string=VARIABLE_END_STRING, comment_start_string=COMMENT_START_STRING, comment_end_string=COMMENT_END_STRING, line_statement_prefix=LINE_STATEMENT_PREFIX, line_comment_prefix=LINE_COMMENT_PREFIX, trim_blocks=TRIM_BLOCKS, lstrip_blocks=LSTRIP_BLOCKS, newline_sequence=NEWLINE_SEQUENCE, keep_trailing_newline=KEEP_TRAILING_NEWLINE, extensions=(), optimized=True, undefined=Undefined, finalize=None, autoescape=False, enable_async=False, ): env = get_spontaneous_environment( cls.environment_class, block_start_string, block_end_string, variable_start_string, variable_end_string, comment_start_string, comment_end_string, line_statement_prefix, line_comment_prefix, trim_blocks, lstrip_blocks, newline_sequence, keep_trailing_newline, frozenset(extensions), optimized, undefined, finalize, autoescape, None, 0, False, None, enable_async, ) return env.from_string(source, template_class=cls) @classmethod def from_code(cls, environment, code, globals, uptodate=None): """Creates a template object from compiled code and the globals. This is used by the loaders and environment to create a template object. """ namespace = {"environment": environment, "__file__": code.co_filename} exec(code, namespace) rv = cls._from_namespace(environment, namespace, globals) rv._uptodate = uptodate return rv @classmethod def from_module_dict(cls, environment, module_dict, globals): """Creates a template object from a module. This is used by the module loader to create a template object. .. versionadded:: 2.4 """ return cls._from_namespace(environment, module_dict, globals) @classmethod def _from_namespace(cls, environment, namespace, globals): t = object.__new__(cls) t.environment = environment t.globals = globals t.name = namespace["name"] t.filename = namespace["__file__"] t.blocks = namespace["blocks"] # render function and module t.root_render_func = namespace["root"] t._module = None # debug and loader helpers t._debug_info = namespace["debug_info"] t._uptodate = None # store the reference namespace["environment"] = environment namespace["__jinja_template__"] = t return t def render(self, *args, **kwargs): """This method accepts the same arguments as the `dict` constructor: A dict, a dict subclass or some keyword arguments. If no arguments are given the context will be empty. These two calls do the same:: template.render(knights='that say nih') template.render({'knights': 'that say nih'}) This will return the rendered template as unicode string. """ vars = dict(*args, **kwargs) try: return concat(self.root_render_func(self.new_context(vars))) except Exception: self.environment.handle_exception() def render_async(self, *args, **kwargs): """This works similar to :meth:`render` but returns a coroutine that when awaited returns the entire rendered template string. This requires the async feature to be enabled. Example usage:: await template.render_async(knights='that say nih; asynchronously') """ # see asyncsupport for the actual implementation raise NotImplementedError( "This feature is not available for this version of Python" ) def stream(self, *args, **kwargs): """Works exactly like :meth:`generate` but returns a :class:`TemplateStream`. """ return TemplateStream(self.generate(*args, **kwargs)) def generate(self, *args, **kwargs): """For very large templates it can be useful to not render the whole template at once but evaluate each statement after another and yield piece for piece. This method basically does exactly that and returns a generator that yields one item after another as unicode strings. It accepts the same arguments as :meth:`render`. """ vars = dict(*args, **kwargs) try: for event in self.root_render_func(self.new_context(vars)): yield event except Exception: yield self.environment.handle_exception() def generate_async(self, *args, **kwargs): """An async version of :meth:`generate`. Works very similarly but returns an async iterator instead. """ # see asyncsupport for the actual implementation raise NotImplementedError( "This feature is not available for this version of Python" ) def new_context(self, vars=None, shared=False, locals=None): """Create a new :class:`Context` for this template. The vars provided will be passed to the template. Per default the globals are added to the context. If shared is set to `True` the data is passed as is to the context without adding the globals. `locals` can be a dict of local variables for internal usage. """ return new_context( self.environment, self.name, self.blocks, vars, shared, self.globals, locals ) def make_module(self, vars=None, shared=False, locals=None): """This method works like the :attr:`module` attribute when called without arguments but it will evaluate the template on every call rather than caching it. It's also possible to provide a dict which is then used as context. The arguments are the same as for the :meth:`new_context` method. """ return TemplateModule(self, self.new_context(vars, shared, locals)) def make_module_async(self, vars=None, shared=False, locals=None): """As template module creation can invoke template code for asynchronous executions this method must be used instead of the normal :meth:`make_module` one. Likewise the module attribute becomes unavailable in async mode. """ # see asyncsupport for the actual implementation raise NotImplementedError( "This feature is not available for this version of Python" ) @internalcode def _get_default_module(self): if self._module is not None: return self._module self._module = rv = self.make_module() return rv @property def module(self): """The template as module. This is used for imports in the template runtime but is also useful if one wants to access exported template variables from the Python layer: >>> t = Template('{% macro foo() %}42{% endmacro %}23') >>> str(t.module) '23' >>> t.module.foo() == u'42' True This attribute is not available if async mode is enabled. """ return self._get_default_module() def get_corresponding_lineno(self, lineno): """Return the source line number of a line number in the generated bytecode as they are not in sync. """ for template_line, code_line in reversed(self.debug_info): if code_line <= lineno: return template_line return 1 @property def is_up_to_date(self): """If this variable is `False` there is a newer version available.""" if self._uptodate is None: return True return self._uptodate() @property def debug_info(self): """The debug info mapping.""" if self._debug_info: return [tuple(map(int, x.split("="))) for x in self._debug_info.split("&")] return [] def __repr__(self): if self.name is None: name = "memory:%x" % id(self) else: name = repr(self.name) return "<%s %s>" % (self.__class__.__name__, name) @implements_to_string class TemplateModule(object): """Represents an imported template. All the exported names of the template are available as attributes on this object. Additionally converting it into an unicode- or bytestrings renders the contents. """ def __init__(self, template, context, body_stream=None): if body_stream is None: if context.environment.is_async: raise RuntimeError( "Async mode requires a body stream " "to be passed to a template module. Use " "the async methods of the API you are " "using." ) body_stream = list(template.root_render_func(context)) self._body_stream = body_stream self.__dict__.update(context.get_exported()) self.__name__ = template.name def __html__(self): return Markup(concat(self._body_stream)) def __str__(self): return concat(self._body_stream) def __repr__(self): if self.__name__ is None: name = "memory:%x" % id(self) else: name = repr(self.__name__) return "<%s %s>" % (self.__class__.__name__, name) class TemplateExpression(object): """The :meth:`jinja2.Environment.compile_expression` method returns an instance of this object. It encapsulates the expression-like access to the template with an expression it wraps. """ def __init__(self, template, undefined_to_none): self._template = template self._undefined_to_none = undefined_to_none def __call__(self, *args, **kwargs): context = self._template.new_context(dict(*args, **kwargs)) consume(self._template.root_render_func(context)) rv = context.vars["result"] if self._undefined_to_none and isinstance(rv, Undefined): rv = None return rv @implements_iterator class TemplateStream(object): """A template stream works pretty much like an ordinary python generator but it can buffer multiple items to reduce the number of total iterations. Per default the output is unbuffered which means that for every unbuffered instruction in the template one unicode string is yielded. If buffering is enabled with a buffer size of 5, five items are combined into a new unicode string. This is mainly useful if you are streaming big templates to a client via WSGI which flushes after each iteration. """ def __init__(self, gen): self._gen = gen self.disable_buffering() def dump(self, fp, encoding=None, errors="strict"): """Dump the complete stream into a file or file-like object. Per default unicode strings are written, if you want to encode before writing specify an `encoding`. Example usage:: Template('Hello {{ name }}!').stream(name='foo').dump('hello.html') """ close = False if isinstance(fp, string_types): if encoding is None: encoding = "utf-8" fp = open(fp, "wb") close = True try: if encoding is not None: iterable = (x.encode(encoding, errors) for x in self) else: iterable = self if hasattr(fp, "writelines"): fp.writelines(iterable) else: for item in iterable: fp.write(item) finally: if close: fp.close() def disable_buffering(self): """Disable the output buffering.""" self._next = partial(next, self._gen) self.buffered = False def _buffered_generator(self, size): buf = [] c_size = 0 push = buf.append while 1: try: while c_size < size: c = next(self._gen) push(c) if c: c_size += 1 except StopIteration: if not c_size: return yield concat(buf) del buf[:] c_size = 0 def enable_buffering(self, size=5): """Enable buffering. Buffer `size` items before yielding them.""" if size <= 1: raise ValueError("buffer size too small") self.buffered = True self._next = partial(next, self._buffered_generator(size)) def __iter__(self): return self def __next__(self): return self._next() # hook in default template class. if anyone reads this comment: ignore that # it's possible to use custom templates ;-) Environment.template_class = Template
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/_identifier.py
import re # generated by scripts/generate_identifier_pattern.py pattern = re.compile( r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950 )
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/optimizer.py
# -*- coding: utf-8 -*- """The optimizer tries to constant fold expressions and modify the AST in place so that it should be faster to evaluate. Because the AST does not contain all the scoping information and the compiler has to find that out, we cannot do all the optimizations we want. For example, loop unrolling doesn't work because unrolled loops would have a different scope. The solution would be a second syntax tree that stored the scoping rules. """ from . import nodes from .visitor import NodeTransformer def optimize(node, environment): """The context hint can be used to perform an static optimization based on the context given.""" optimizer = Optimizer(environment) return optimizer.visit(node) class Optimizer(NodeTransformer): def __init__(self, environment): self.environment = environment def generic_visit(self, node, *args, **kwargs): node = super(Optimizer, self).generic_visit(node, *args, **kwargs) # Do constant folding. Some other nodes besides Expr have # as_const, but folding them causes errors later on. if isinstance(node, nodes.Expr): try: return nodes.Const.from_untrusted( node.as_const(args[0] if args else None), lineno=node.lineno, environment=self.environment, ) except nodes.Impossible: pass return node
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/exceptions.py
# -*- coding: utf-8 -*- from ._compat import imap from ._compat import implements_to_string from ._compat import PY2 from ._compat import text_type class TemplateError(Exception): """Baseclass for all template errors.""" if PY2: def __init__(self, message=None): if message is not None: message = text_type(message).encode("utf-8") Exception.__init__(self, message) @property def message(self): if self.args: message = self.args[0] if message is not None: return message.decode("utf-8", "replace") def __unicode__(self): return self.message or u"" else: def __init__(self, message=None): Exception.__init__(self, message) @property def message(self): if self.args: message = self.args[0] if message is not None: return message @implements_to_string class TemplateNotFound(IOError, LookupError, TemplateError): """Raised if a template does not exist. .. versionchanged:: 2.11 If the given name is :class:`Undefined` and no message was provided, an :exc:`UndefinedError` is raised. """ # looks weird, but removes the warning descriptor that just # bogusly warns us about message being deprecated message = None def __init__(self, name, message=None): IOError.__init__(self, name) if message is None: from .runtime import Undefined if isinstance(name, Undefined): name._fail_with_undefined_error() message = name self.message = message self.name = name self.templates = [name] def __str__(self): return self.message class TemplatesNotFound(TemplateNotFound): """Like :class:`TemplateNotFound` but raised if multiple templates are selected. This is a subclass of :class:`TemplateNotFound` exception, so just catching the base exception will catch both. .. versionchanged:: 2.11 If a name in the list of names is :class:`Undefined`, a message about it being undefined is shown rather than the empty string. .. versionadded:: 2.2 """ def __init__(self, names=(), message=None): if message is None: from .runtime import Undefined parts = [] for name in names: if isinstance(name, Undefined): parts.append(name._undefined_message) else: parts.append(name) message = u"none of the templates given were found: " + u", ".join( imap(text_type, parts) ) TemplateNotFound.__init__(self, names and names[-1] or None, message) self.templates = list(names) @implements_to_string class TemplateSyntaxError(TemplateError): """Raised to tell the user that there is a problem with the template.""" def __init__(self, message, lineno, name=None, filename=None): TemplateError.__init__(self, message) self.lineno = lineno self.name = name self.filename = filename self.source = None # this is set to True if the debug.translate_syntax_error # function translated the syntax error into a new traceback self.translated = False def __str__(self): # for translated errors we only return the message if self.translated: return self.message # otherwise attach some stuff location = "line %d" % self.lineno name = self.filename or self.name if name: location = 'File "%s", %s' % (name, location) lines = [self.message, " " + location] # if the source is set, add the line to the output if self.source is not None: try: line = self.source.splitlines()[self.lineno - 1] except IndexError: line = None if line: lines.append(" " + line.strip()) return u"\n".join(lines) def __reduce__(self): # https://bugs.python.org/issue1692335 Exceptions that take # multiple required arguments have problems with pickling. # Without this, raises TypeError: __init__() missing 1 required # positional argument: 'lineno' return self.__class__, (self.message, self.lineno, self.name, self.filename) class TemplateAssertionError(TemplateSyntaxError): """Like a template syntax error, but covers cases where something in the template caused an error at compile time that wasn't necessarily caused by a syntax error. However it's a direct subclass of :exc:`TemplateSyntaxError` and has the same attributes. """ class TemplateRuntimeError(TemplateError): """A generic runtime error in the template engine. Under some situations Jinja may raise this exception. """ class UndefinedError(TemplateRuntimeError): """Raised if a template tries to operate on :class:`Undefined`.""" class SecurityError(TemplateRuntimeError): """Raised if a template tries to do something insecure if the sandbox is enabled. """ class FilterArgumentError(TemplateRuntimeError): """This error is raised if a filter was called with inappropriate arguments """
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/defaults.py
# -*- coding: utf-8 -*- from ._compat import range_type from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401 from .tests import TESTS as DEFAULT_TESTS # noqa: F401 from .utils import Cycler from .utils import generate_lorem_ipsum from .utils import Joiner from .utils import Namespace # defaults for the parser / lexer BLOCK_START_STRING = "{%" BLOCK_END_STRING = "%}" VARIABLE_START_STRING = "{{" VARIABLE_END_STRING = "}}" COMMENT_START_STRING = "{#" COMMENT_END_STRING = "#}" LINE_STATEMENT_PREFIX = None LINE_COMMENT_PREFIX = None TRIM_BLOCKS = False LSTRIP_BLOCKS = False NEWLINE_SEQUENCE = "\n" KEEP_TRAILING_NEWLINE = False # default filters, tests and namespace DEFAULT_NAMESPACE = { "range": range_type, "dict": dict, "lipsum": generate_lorem_ipsum, "cycler": Cycler, "joiner": Joiner, "namespace": Namespace, } # default policies DEFAULT_POLICIES = { "compiler.ascii_str": True, "urlize.rel": "noopener", "urlize.target": None, "truncate.leeway": 5, "json.dumps_function": None, "json.dumps_kwargs": {"sort_keys": True}, "ext.i18n.trimmed": False, }
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/nativetypes.py
from ast import literal_eval from itertools import chain from itertools import islice from . import nodes from ._compat import text_type from .compiler import CodeGenerator from .compiler import has_safe_repr from .environment import Environment from .environment import Template def native_concat(nodes): """Return a native Python type from the list of compiled nodes. If the result is a single node, its value is returned. Otherwise, the nodes are concatenated as strings. If the result can be parsed with :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the string is returned. :param nodes: Iterable of nodes to concatenate. """ head = list(islice(nodes, 2)) if not head: return None if len(head) == 1: raw = head[0] else: raw = u"".join([text_type(v) for v in chain(head, nodes)]) try: return literal_eval(raw) except (ValueError, SyntaxError, MemoryError): return raw class NativeCodeGenerator(CodeGenerator): """A code generator which renders Python types by not adding ``to_string()`` around output nodes. """ @staticmethod def _default_finalize(value): return value def _output_const_repr(self, group): return repr(u"".join([text_type(v) for v in group])) def _output_child_to_const(self, node, frame, finalize): const = node.as_const(frame.eval_ctx) if not has_safe_repr(const): raise nodes.Impossible() if isinstance(node, nodes.TemplateData): return const return finalize.const(const) def _output_child_pre(self, node, frame, finalize): if finalize.src is not None: self.write(finalize.src) def _output_child_post(self, node, frame, finalize): if finalize.src is not None: self.write(")") class NativeEnvironment(Environment): """An environment that renders templates to native Python types.""" code_generator_class = NativeCodeGenerator class NativeTemplate(Template): environment_class = NativeEnvironment def render(self, *args, **kwargs): """Render the template to produce a native Python type. If the result is a single node, its value is returned. Otherwise, the nodes are concatenated as strings. If the result can be parsed with :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the string is returned. """ vars = dict(*args, **kwargs) try: return native_concat(self.root_render_func(self.new_context(vars))) except Exception: return self.environment.handle_exception() NativeEnvironment.template_class = NativeTemplate
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/_compat.py
# -*- coding: utf-8 -*- # flake8: noqa import marshal import sys PY2 = sys.version_info[0] == 2 PYPY = hasattr(sys, "pypy_translation_info") _identity = lambda x: x if not PY2: unichr = chr range_type = range text_type = str string_types = (str,) integer_types = (int,) iterkeys = lambda d: iter(d.keys()) itervalues = lambda d: iter(d.values()) iteritems = lambda d: iter(d.items()) import pickle from io import BytesIO, StringIO NativeStringIO = StringIO def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value ifilter = filter imap = map izip = zip intern = sys.intern implements_iterator = _identity implements_to_string = _identity encode_filename = _identity marshal_dump = marshal.dump marshal_load = marshal.load else: unichr = unichr text_type = unicode range_type = xrange string_types = (str, unicode) integer_types = (int, long) iterkeys = lambda d: d.iterkeys() itervalues = lambda d: d.itervalues() iteritems = lambda d: d.iteritems() import cPickle as pickle from cStringIO import StringIO as BytesIO, StringIO NativeStringIO = BytesIO exec("def reraise(tp, value, tb=None):\n raise tp, value, tb") from itertools import imap, izip, ifilter intern = intern def implements_iterator(cls): cls.next = cls.__next__ del cls.__next__ return cls def implements_to_string(cls): cls.__unicode__ = cls.__str__ cls.__str__ = lambda x: x.__unicode__().encode("utf-8") return cls def encode_filename(filename): if isinstance(filename, unicode): return filename.encode("utf-8") return filename def marshal_dump(code, f): if isinstance(f, file): marshal.dump(code, f) else: f.write(marshal.dumps(code)) def marshal_load(f): if isinstance(f, file): return marshal.load(f) return marshal.loads(f.read()) def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" # This requires a bit of explanation: the basic idea is to make a # dummy metaclass for one level of class instantiation that replaces # itself with the actual metaclass. class metaclass(type): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, "temporary_class", (), {}) try: from urllib.parse import quote_from_bytes as url_quote except ImportError: from urllib import quote as url_quote try: from collections import abc except ImportError: import collections as abc try: from os import fspath except ImportError: try: from pathlib import PurePath except ImportError: PurePath = None def fspath(path): if hasattr(path, "__fspath__"): return path.__fspath__() # Python 3.5 doesn't have __fspath__ yet, use str. if PurePath is not None and isinstance(path, PurePath): return str(path) return path
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/nodes.py
# -*- coding: utf-8 -*- """AST nodes generated by the parser for the compiler. Also provides some node tree helper functions used by the parser and compiler in order to normalize nodes. """ import operator from collections import deque from markupsafe import Markup from ._compat import izip from ._compat import PY2 from ._compat import text_type from ._compat import with_metaclass _binop_to_func = { "*": operator.mul, "/": operator.truediv, "//": operator.floordiv, "**": operator.pow, "%": operator.mod, "+": operator.add, "-": operator.sub, } _uaop_to_func = {"not": operator.not_, "+": operator.pos, "-": operator.neg} _cmpop_to_func = { "eq": operator.eq, "ne": operator.ne, "gt": operator.gt, "gteq": operator.ge, "lt": operator.lt, "lteq": operator.le, "in": lambda a, b: a in b, "notin": lambda a, b: a not in b, } class Impossible(Exception): """Raised if the node could not perform a requested action.""" class NodeType(type): """A metaclass for nodes that handles the field and attribute inheritance. fields and attributes from the parent class are automatically forwarded to the child.""" def __new__(mcs, name, bases, d): for attr in "fields", "attributes": storage = [] storage.extend(getattr(bases[0], attr, ())) storage.extend(d.get(attr, ())) assert len(bases) == 1, "multiple inheritance not allowed" assert len(storage) == len(set(storage)), "layout conflict" d[attr] = tuple(storage) d.setdefault("abstract", False) return type.__new__(mcs, name, bases, d) class EvalContext(object): """Holds evaluation time information. Custom attributes can be attached to it in extensions. """ def __init__(self, environment, template_name=None): self.environment = environment if callable(environment.autoescape): self.autoescape = environment.autoescape(template_name) else: self.autoescape = environment.autoescape self.volatile = False def save(self): return self.__dict__.copy() def revert(self, old): self.__dict__.clear() self.__dict__.update(old) def get_eval_context(node, ctx): if ctx is None: if node.environment is None: raise RuntimeError( "if no eval context is passed, the " "node must have an attached " "environment." ) return EvalContext(node.environment) return ctx class Node(with_metaclass(NodeType, object)): """Baseclass for all Jinja nodes. There are a number of nodes available of different types. There are four major types: - :class:`Stmt`: statements - :class:`Expr`: expressions - :class:`Helper`: helper nodes - :class:`Template`: the outermost wrapper node All nodes have fields and attributes. Fields may be other nodes, lists, or arbitrary values. Fields are passed to the constructor as regular positional arguments, attributes as keyword arguments. Each node has two attributes: `lineno` (the line number of the node) and `environment`. The `environment` attribute is set at the end of the parsing process for all nodes automatically. """ fields = () attributes = ("lineno", "environment") abstract = True def __init__(self, *fields, **attributes): if self.abstract: raise TypeError("abstract nodes are not instantiable") if fields: if len(fields) != len(self.fields): if not self.fields: raise TypeError("%r takes 0 arguments" % self.__class__.__name__) raise TypeError( "%r takes 0 or %d argument%s" % ( self.__class__.__name__, len(self.fields), len(self.fields) != 1 and "s" or "", ) ) for name, arg in izip(self.fields, fields): setattr(self, name, arg) for attr in self.attributes: setattr(self, attr, attributes.pop(attr, None)) if attributes: raise TypeError("unknown attribute %r" % next(iter(attributes))) def iter_fields(self, exclude=None, only=None): """This method iterates over all fields that are defined and yields ``(key, value)`` tuples. Per default all fields are returned, but it's possible to limit that to some fields by providing the `only` parameter or to exclude some using the `exclude` parameter. Both should be sets or tuples of field names. """ for name in self.fields: if ( (exclude is only is None) or (exclude is not None and name not in exclude) or (only is not None and name in only) ): try: yield name, getattr(self, name) except AttributeError: pass def iter_child_nodes(self, exclude=None, only=None): """Iterates over all direct child nodes of the node. This iterates over all fields and yields the values of they are nodes. If the value of a field is a list all the nodes in that list are returned. """ for _, item in self.iter_fields(exclude, only): if isinstance(item, list): for n in item: if isinstance(n, Node): yield n elif isinstance(item, Node): yield item def find(self, node_type): """Find the first node of a given type. If no such node exists the return value is `None`. """ for result in self.find_all(node_type): return result def find_all(self, node_type): """Find all the nodes of a given type. If the type is a tuple, the check is performed for any of the tuple items. """ for child in self.iter_child_nodes(): if isinstance(child, node_type): yield child for result in child.find_all(node_type): yield result def set_ctx(self, ctx): """Reset the context of a node and all child nodes. Per default the parser will all generate nodes that have a 'load' context as it's the most common one. This method is used in the parser to set assignment targets and other nodes to a store context. """ todo = deque([self]) while todo: node = todo.popleft() if "ctx" in node.fields: node.ctx = ctx todo.extend(node.iter_child_nodes()) return self def set_lineno(self, lineno, override=False): """Set the line numbers of the node and children.""" todo = deque([self]) while todo: node = todo.popleft() if "lineno" in node.attributes: if node.lineno is None or override: node.lineno = lineno todo.extend(node.iter_child_nodes()) return self def set_environment(self, environment): """Set the environment for all nodes.""" todo = deque([self]) while todo: node = todo.popleft() node.environment = environment todo.extend(node.iter_child_nodes()) return self def __eq__(self, other): return type(self) is type(other) and tuple(self.iter_fields()) == tuple( other.iter_fields() ) def __ne__(self, other): return not self.__eq__(other) # Restore Python 2 hashing behavior on Python 3 __hash__ = object.__hash__ def __repr__(self): return "%s(%s)" % ( self.__class__.__name__, ", ".join("%s=%r" % (arg, getattr(self, arg, None)) for arg in self.fields), ) def dump(self): def _dump(node): if not isinstance(node, Node): buf.append(repr(node)) return buf.append("nodes.%s(" % node.__class__.__name__) if not node.fields: buf.append(")") return for idx, field in enumerate(node.fields): if idx: buf.append(", ") value = getattr(node, field) if isinstance(value, list): buf.append("[") for idx, item in enumerate(value): if idx: buf.append(", ") _dump(item) buf.append("]") else: _dump(value) buf.append(")") buf = [] _dump(self) return "".join(buf) class Stmt(Node): """Base node for all statements.""" abstract = True class Helper(Node): """Nodes that exist in a specific context only.""" abstract = True class Template(Node): """Node that represents a template. This must be the outermost node that is passed to the compiler. """ fields = ("body",) class Output(Stmt): """A node that holds multiple expressions which are then printed out. This is used both for the `print` statement and the regular template data. """ fields = ("nodes",) class Extends(Stmt): """Represents an extends statement.""" fields = ("template",) class For(Stmt): """The for loop. `target` is the target for the iteration (usually a :class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list of nodes that are used as loop-body, and `else_` a list of nodes for the `else` block. If no else node exists it has to be an empty list. For filtered nodes an expression can be stored as `test`, otherwise `None`. """ fields = ("target", "iter", "body", "else_", "test", "recursive") class If(Stmt): """If `test` is true, `body` is rendered, else `else_`.""" fields = ("test", "body", "elif_", "else_") class Macro(Stmt): """A macro definition. `name` is the name of the macro, `args` a list of arguments and `defaults` a list of defaults if there are any. `body` is a list of nodes for the macro body. """ fields = ("name", "args", "defaults", "body") class CallBlock(Stmt): """Like a macro without a name but a call instead. `call` is called with the unnamed macro as `caller` argument this node holds. """ fields = ("call", "args", "defaults", "body") class FilterBlock(Stmt): """Node for filter sections.""" fields = ("body", "filter") class With(Stmt): """Specific node for with statements. In older versions of Jinja the with statement was implemented on the base of the `Scope` node instead. .. versionadded:: 2.9.3 """ fields = ("targets", "values", "body") class Block(Stmt): """A node that represents a block.""" fields = ("name", "body", "scoped") class Include(Stmt): """A node that represents the include tag.""" fields = ("template", "with_context", "ignore_missing") class Import(Stmt): """A node that represents the import tag.""" fields = ("template", "target", "with_context") class FromImport(Stmt): """A node that represents the from import tag. It's important to not pass unsafe names to the name attribute. The compiler translates the attribute lookups directly into getattr calls and does *not* use the subscript callback of the interface. As exported variables may not start with double underscores (which the parser asserts) this is not a problem for regular Jinja code, but if this node is used in an extension extra care must be taken. The list of names may contain tuples if aliases are wanted. """ fields = ("template", "names", "with_context") class ExprStmt(Stmt): """A statement that evaluates an expression and discards the result.""" fields = ("node",) class Assign(Stmt): """Assigns an expression to a target.""" fields = ("target", "node") class AssignBlock(Stmt): """Assigns a block to a target.""" fields = ("target", "filter", "body") class Expr(Node): """Baseclass for all expressions.""" abstract = True def as_const(self, eval_ctx=None): """Return the value of the expression as constant or raise :exc:`Impossible` if this was not possible. An :class:`EvalContext` can be provided, if none is given a default context is created which requires the nodes to have an attached environment. .. versionchanged:: 2.4 the `eval_ctx` parameter was added. """ raise Impossible() def can_assign(self): """Check if it's possible to assign something to this node.""" return False class BinExpr(Expr): """Baseclass for all binary expressions.""" fields = ("left", "right") operator = None abstract = True def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) # intercepted operators cannot be folded at compile time if ( self.environment.sandboxed and self.operator in self.environment.intercepted_binops ): raise Impossible() f = _binop_to_func[self.operator] try: return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx)) except Exception: raise Impossible() class UnaryExpr(Expr): """Baseclass for all unary expressions.""" fields = ("node",) operator = None abstract = True def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) # intercepted operators cannot be folded at compile time if ( self.environment.sandboxed and self.operator in self.environment.intercepted_unops ): raise Impossible() f = _uaop_to_func[self.operator] try: return f(self.node.as_const(eval_ctx)) except Exception: raise Impossible() class Name(Expr): """Looks up a name or stores a value in a name. The `ctx` of the node can be one of the following values: - `store`: store a value in the name - `load`: load that name - `param`: like `store` but if the name was defined as function parameter. """ fields = ("name", "ctx") def can_assign(self): return self.name not in ("true", "false", "none", "True", "False", "None") class NSRef(Expr): """Reference to a namespace value assignment""" fields = ("name", "attr") def can_assign(self): # We don't need any special checks here; NSRef assignments have a # runtime check to ensure the target is a namespace object which will # have been checked already as it is created using a normal assignment # which goes through a `Name` node. return True class Literal(Expr): """Baseclass for literals.""" abstract = True class Const(Literal): """All constant values. The parser will return this node for simple constants such as ``42`` or ``"foo"`` but it can be used to store more complex values such as lists too. Only constants with a safe representation (objects where ``eval(repr(x)) == x`` is true). """ fields = ("value",) def as_const(self, eval_ctx=None): rv = self.value if ( PY2 and type(rv) is text_type and self.environment.policies["compiler.ascii_str"] ): try: rv = rv.encode("ascii") except UnicodeError: pass return rv @classmethod def from_untrusted(cls, value, lineno=None, environment=None): """Return a const object if the value is representable as constant value in the generated code, otherwise it will raise an `Impossible` exception. """ from .compiler import has_safe_repr if not has_safe_repr(value): raise Impossible() return cls(value, lineno=lineno, environment=environment) class TemplateData(Literal): """A constant template string.""" fields = ("data",) def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) if eval_ctx.volatile: raise Impossible() if eval_ctx.autoescape: return Markup(self.data) return self.data class Tuple(Literal): """For loop unpacking and some other things like multiple arguments for subscripts. Like for :class:`Name` `ctx` specifies if the tuple is used for loading the names or storing. """ fields = ("items", "ctx") def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return tuple(x.as_const(eval_ctx) for x in self.items) def can_assign(self): for item in self.items: if not item.can_assign(): return False return True class List(Literal): """Any list literal such as ``[1, 2, 3]``""" fields = ("items",) def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return [x.as_const(eval_ctx) for x in self.items] class Dict(Literal): """Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of :class:`Pair` nodes. """ fields = ("items",) def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return dict(x.as_const(eval_ctx) for x in self.items) class Pair(Helper): """A key, value pair for dicts.""" fields = ("key", "value") def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx) class Keyword(Helper): """A key, value pair for keyword arguments where key is a string.""" fields = ("key", "value") def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return self.key, self.value.as_const(eval_ctx) class CondExpr(Expr): """A conditional expression (inline if expression). (``{{ foo if bar else baz }}``) """ fields = ("test", "expr1", "expr2") def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) if self.test.as_const(eval_ctx): return self.expr1.as_const(eval_ctx) # if we evaluate to an undefined object, we better do that at runtime if self.expr2 is None: raise Impossible() return self.expr2.as_const(eval_ctx) def args_as_const(node, eval_ctx): args = [x.as_const(eval_ctx) for x in node.args] kwargs = dict(x.as_const(eval_ctx) for x in node.kwargs) if node.dyn_args is not None: try: args.extend(node.dyn_args.as_const(eval_ctx)) except Exception: raise Impossible() if node.dyn_kwargs is not None: try: kwargs.update(node.dyn_kwargs.as_const(eval_ctx)) except Exception: raise Impossible() return args, kwargs class Filter(Expr): """This node applies a filter on an expression. `name` is the name of the filter, the rest of the fields are the same as for :class:`Call`. If the `node` of a filter is `None` the contents of the last buffer are filtered. Buffers are created by macros and filter blocks. """ fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs") def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) if eval_ctx.volatile or self.node is None: raise Impossible() # we have to be careful here because we call filter_ below. # if this variable would be called filter, 2to3 would wrap the # call in a list because it is assuming we are talking about the # builtin filter function here which no longer returns a list in # python 3. because of that, do not rename filter_ to filter! filter_ = self.environment.filters.get(self.name) if filter_ is None or getattr(filter_, "contextfilter", False) is True: raise Impossible() # We cannot constant handle async filters, so we need to make sure # to not go down this path. if eval_ctx.environment.is_async and getattr( filter_, "asyncfiltervariant", False ): raise Impossible() args, kwargs = args_as_const(self, eval_ctx) args.insert(0, self.node.as_const(eval_ctx)) if getattr(filter_, "evalcontextfilter", False) is True: args.insert(0, eval_ctx) elif getattr(filter_, "environmentfilter", False) is True: args.insert(0, self.environment) try: return filter_(*args, **kwargs) except Exception: raise Impossible() class Test(Expr): """Applies a test on an expression. `name` is the name of the test, the rest of the fields are the same as for :class:`Call`. """ fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs") def as_const(self, eval_ctx=None): test = self.environment.tests.get(self.name) if test is None: raise Impossible() eval_ctx = get_eval_context(self, eval_ctx) args, kwargs = args_as_const(self, eval_ctx) args.insert(0, self.node.as_const(eval_ctx)) try: return test(*args, **kwargs) except Exception: raise Impossible() class Call(Expr): """Calls an expression. `args` is a list of arguments, `kwargs` a list of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args` and `dyn_kwargs` has to be either `None` or a node that is used as node for dynamic positional (``*args``) or keyword (``**kwargs``) arguments. """ fields = ("node", "args", "kwargs", "dyn_args", "dyn_kwargs") class Getitem(Expr): """Get an attribute or item from an expression and prefer the item.""" fields = ("node", "arg", "ctx") def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) if self.ctx != "load": raise Impossible() try: return self.environment.getitem( self.node.as_const(eval_ctx), self.arg.as_const(eval_ctx) ) except Exception: raise Impossible() def can_assign(self): return False class Getattr(Expr): """Get an attribute or item from an expression that is a ascii-only bytestring and prefer the attribute. """ fields = ("node", "attr", "ctx") def as_const(self, eval_ctx=None): if self.ctx != "load": raise Impossible() try: eval_ctx = get_eval_context(self, eval_ctx) return self.environment.getattr(self.node.as_const(eval_ctx), self.attr) except Exception: raise Impossible() def can_assign(self): return False class Slice(Expr): """Represents a slice object. This must only be used as argument for :class:`Subscript`. """ fields = ("start", "stop", "step") def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) def const(obj): if obj is None: return None return obj.as_const(eval_ctx) return slice(const(self.start), const(self.stop), const(self.step)) class Concat(Expr): """Concatenates the list of expressions provided after converting them to unicode. """ fields = ("nodes",) def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return "".join(text_type(x.as_const(eval_ctx)) for x in self.nodes) class Compare(Expr): """Compares an expression with some other expressions. `ops` must be a list of :class:`Operand`\\s. """ fields = ("expr", "ops") def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) result = value = self.expr.as_const(eval_ctx) try: for op in self.ops: new_value = op.expr.as_const(eval_ctx) result = _cmpop_to_func[op.op](value, new_value) if not result: return False value = new_value except Exception: raise Impossible() return result class Operand(Helper): """Holds an operator and an expression.""" fields = ("op", "expr") if __debug__: Operand.__doc__ += "\nThe following operators are available: " + ", ".join( sorted( "``%s``" % x for x in set(_binop_to_func) | set(_uaop_to_func) | set(_cmpop_to_func) ) ) class Mul(BinExpr): """Multiplies the left with the right node.""" operator = "*" class Div(BinExpr): """Divides the left by the right node.""" operator = "/" class FloorDiv(BinExpr): """Divides the left by the right node and truncates conver the result into an integer by truncating. """ operator = "//" class Add(BinExpr): """Add the left to the right node.""" operator = "+" class Sub(BinExpr): """Subtract the right from the left node.""" operator = "-" class Mod(BinExpr): """Left modulo right.""" operator = "%" class Pow(BinExpr): """Left to the power of right.""" operator = "**" class And(BinExpr): """Short circuited AND.""" operator = "and" def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx) class Or(BinExpr): """Short circuited OR.""" operator = "or" def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx) class Not(UnaryExpr): """Negate the expression.""" operator = "not" class Neg(UnaryExpr): """Make the expression negative.""" operator = "-" class Pos(UnaryExpr): """Make the expression positive (noop for most expressions)""" operator = "+" # Helpers for extensions class EnvironmentAttribute(Expr): """Loads an attribute from the environment object. This is useful for extensions that want to call a callback stored on the environment. """ fields = ("name",) class ExtensionAttribute(Expr): """Returns the attribute of an extension bound to the environment. The identifier is the identifier of the :class:`Extension`. This node is usually constructed by calling the :meth:`~jinja2.ext.Extension.attr` method on an extension. """ fields = ("identifier", "name") class ImportedName(Expr): """If created with an import name the import name is returned on node access. For example ``ImportedName('cgi.escape')`` returns the `escape` function from the cgi module on evaluation. Imports are optimized by the compiler so there is no need to assign them to local variables. """ fields = ("importname",) class InternalName(Expr): """An internal name in the compiler. You cannot create these nodes yourself but the parser provides a :meth:`~jinja2.parser.Parser.free_identifier` method that creates a new identifier for you. This identifier is not available from the template and is not threated specially by the compiler. """ fields = ("name",) def __init__(self): raise TypeError( "Can't create internal names. Use the " "`free_identifier` method on a parser." ) class MarkSafe(Expr): """Mark the wrapped expression as safe (wrap it as `Markup`).""" fields = ("expr",) def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return Markup(self.expr.as_const(eval_ctx)) class MarkSafeIfAutoescape(Expr): """Mark the wrapped expression as safe (wrap it as `Markup`) but only if autoescaping is active. .. versionadded:: 2.5 """ fields = ("expr",) def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) if eval_ctx.volatile: raise Impossible() expr = self.expr.as_const(eval_ctx) if eval_ctx.autoescape: return Markup(expr) return expr class ContextReference(Expr): """Returns the current template context. It can be used like a :class:`Name` node, with a ``'load'`` ctx and will return the current :class:`~jinja2.runtime.Context` object. Here an example that assigns the current template name to a variable named `foo`:: Assign(Name('foo', ctx='store'), Getattr(ContextReference(), 'name')) This is basically equivalent to using the :func:`~jinja2.contextfunction` decorator when using the high-level API, which causes a reference to the context to be passed as the first argument to a function. """ class DerivedContextReference(Expr): """Return the current template context including locals. Behaves exactly like :class:`ContextReference`, but includes local variables, such as from a ``for`` loop. .. versionadded:: 2.11 """ class Continue(Stmt): """Continue a loop.""" class Break(Stmt): """Break a loop.""" class Scope(Stmt): """An artificial scope.""" fields = ("body",) class OverlayScope(Stmt): """An overlay scope for extensions. This is a largely unoptimized scope that however can be used to introduce completely arbitrary variables into a sub scope from a dictionary or dictionary like object. The `context` field has to evaluate to a dictionary object. Example usage:: OverlayScope(context=self.call_method('get_context'), body=[...]) .. versionadded:: 2.10 """ fields = ("context", "body") class EvalContextModifier(Stmt): """Modifies the eval context. For each option that should be modified, a :class:`Keyword` has to be added to the :attr:`options` list. Example to change the `autoescape` setting:: EvalContextModifier(options=[Keyword('autoescape', Const(True))]) """ fields = ("options",) class ScopedEvalContextModifier(EvalContextModifier): """Modifies the eval context and reverts it later. Works exactly like :class:`EvalContextModifier` but will only modify the :class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`. """ fields = ("body",) # make sure nobody creates custom nodes def _failing_new(*args, **kwargs): raise TypeError("can't create custom node types") NodeType.__new__ = staticmethod(_failing_new) del _failing_new
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/tests.py
# -*- coding: utf-8 -*- """Built-in template tests used with the ``is`` operator.""" import decimal import operator import re from ._compat import abc from ._compat import integer_types from ._compat import string_types from ._compat import text_type from .runtime import Undefined number_re = re.compile(r"^-?\d+(\.\d+)?$") regex_type = type(number_re) test_callable = callable def test_odd(value): """Return true if the variable is odd.""" return value % 2 == 1 def test_even(value): """Return true if the variable is even.""" return value % 2 == 0 def test_divisibleby(value, num): """Check if a variable is divisible by a number.""" return value % num == 0 def test_defined(value): """Return true if the variable is defined: .. sourcecode:: jinja {% if variable is defined %} value of variable: {{ variable }} {% else %} variable is not defined {% endif %} See the :func:`default` filter for a simple way to set undefined variables. """ return not isinstance(value, Undefined) def test_undefined(value): """Like :func:`defined` but the other way round.""" return isinstance(value, Undefined) def test_none(value): """Return true if the variable is none.""" return value is None def test_boolean(value): """Return true if the object is a boolean value. .. versionadded:: 2.11 """ return value is True or value is False def test_false(value): """Return true if the object is False. .. versionadded:: 2.11 """ return value is False def test_true(value): """Return true if the object is True. .. versionadded:: 2.11 """ return value is True # NOTE: The existing 'number' test matches booleans and floats def test_integer(value): """Return true if the object is an integer. .. versionadded:: 2.11 """ return isinstance(value, integer_types) and value is not True and value is not False # NOTE: The existing 'number' test matches booleans and integers def test_float(value): """Return true if the object is a float. .. versionadded:: 2.11 """ return isinstance(value, float) def test_lower(value): """Return true if the variable is lowercased.""" return text_type(value).islower() def test_upper(value): """Return true if the variable is uppercased.""" return text_type(value).isupper() def test_string(value): """Return true if the object is a string.""" return isinstance(value, string_types) def test_mapping(value): """Return true if the object is a mapping (dict etc.). .. versionadded:: 2.6 """ return isinstance(value, abc.Mapping) def test_number(value): """Return true if the variable is a number.""" return isinstance(value, integer_types + (float, complex, decimal.Decimal)) def test_sequence(value): """Return true if the variable is a sequence. Sequences are variables that are iterable. """ try: len(value) value.__getitem__ except Exception: return False return True def test_sameas(value, other): """Check if an object points to the same memory address than another object: .. sourcecode:: jinja {% if foo.attribute is sameas false %} the foo attribute really is the `False` singleton {% endif %} """ return value is other def test_iterable(value): """Check if it's possible to iterate over an object.""" try: iter(value) except TypeError: return False return True def test_escaped(value): """Check if the value is escaped.""" return hasattr(value, "__html__") def test_in(value, seq): """Check if value is in seq. .. versionadded:: 2.10 """ return value in seq TESTS = { "odd": test_odd, "even": test_even, "divisibleby": test_divisibleby, "defined": test_defined, "undefined": test_undefined, "none": test_none, "boolean": test_boolean, "false": test_false, "true": test_true, "integer": test_integer, "float": test_float, "lower": test_lower, "upper": test_upper, "string": test_string, "mapping": test_mapping, "number": test_number, "sequence": test_sequence, "iterable": test_iterable, "callable": test_callable, "sameas": test_sameas, "escaped": test_escaped, "in": test_in, "==": operator.eq, "eq": operator.eq, "equalto": operator.eq, "!=": operator.ne, "ne": operator.ne, ">": operator.gt, "gt": operator.gt, "greaterthan": operator.gt, "ge": operator.ge, ">=": operator.ge, "<": operator.lt, "lt": operator.lt, "lessthan": operator.lt, "<=": operator.le, "le": operator.le, }
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/bccache.py
# -*- coding: utf-8 -*- """The optional bytecode cache system. This is useful if you have very complex template situations and the compilation of all those templates slows down your application too much. Situations where this is useful are often forking web applications that are initialized on the first request. """ import errno import fnmatch import os import stat import sys import tempfile from hashlib import sha1 from os import listdir from os import path from ._compat import BytesIO from ._compat import marshal_dump from ._compat import marshal_load from ._compat import pickle from ._compat import text_type from .utils import open_if_exists bc_version = 4 # Magic bytes to identify Jinja bytecode cache files. Contains the # Python major and minor version to avoid loading incompatible bytecode # if a project upgrades its Python version. bc_magic = ( b"j2" + pickle.dumps(bc_version, 2) + pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2) ) class Bucket(object): """Buckets are used to store the bytecode for one template. It's created and initialized by the bytecode cache and passed to the loading functions. The buckets get an internal checksum from the cache assigned and use this to automatically reject outdated cache material. Individual bytecode cache subclasses don't have to care about cache invalidation. """ def __init__(self, environment, key, checksum): self.environment = environment self.key = key self.checksum = checksum self.reset() def reset(self): """Resets the bucket (unloads the bytecode).""" self.code = None def load_bytecode(self, f): """Loads bytecode from a file or file like object.""" # make sure the magic header is correct magic = f.read(len(bc_magic)) if magic != bc_magic: self.reset() return # the source code of the file changed, we need to reload checksum = pickle.load(f) if self.checksum != checksum: self.reset() return # if marshal_load fails then we need to reload try: self.code = marshal_load(f) except (EOFError, ValueError, TypeError): self.reset() return def write_bytecode(self, f): """Dump the bytecode into the file or file like object passed.""" if self.code is None: raise TypeError("can't write empty bucket") f.write(bc_magic) pickle.dump(self.checksum, f, 2) marshal_dump(self.code, f) def bytecode_from_string(self, string): """Load bytecode from a string.""" self.load_bytecode(BytesIO(string)) def bytecode_to_string(self): """Return the bytecode as string.""" out = BytesIO() self.write_bytecode(out) return out.getvalue() class BytecodeCache(object): """To implement your own bytecode cache you have to subclass this class and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of these methods are passed a :class:`~jinja2.bccache.Bucket`. A very basic bytecode cache that saves the bytecode on the file system:: from os import path class MyCache(BytecodeCache): def __init__(self, directory): self.directory = directory def load_bytecode(self, bucket): filename = path.join(self.directory, bucket.key) if path.exists(filename): with open(filename, 'rb') as f: bucket.load_bytecode(f) def dump_bytecode(self, bucket): filename = path.join(self.directory, bucket.key) with open(filename, 'wb') as f: bucket.write_bytecode(f) A more advanced version of a filesystem based bytecode cache is part of Jinja. """ def load_bytecode(self, bucket): """Subclasses have to override this method to load bytecode into a bucket. If they are not able to find code in the cache for the bucket, it must not do anything. """ raise NotImplementedError() def dump_bytecode(self, bucket): """Subclasses have to override this method to write the bytecode from a bucket back to the cache. If it unable to do so it must not fail silently but raise an exception. """ raise NotImplementedError() def clear(self): """Clears the cache. This method is not used by Jinja but should be implemented to allow applications to clear the bytecode cache used by a particular environment. """ def get_cache_key(self, name, filename=None): """Returns the unique hash key for this template name.""" hash = sha1(name.encode("utf-8")) if filename is not None: filename = "|" + filename if isinstance(filename, text_type): filename = filename.encode("utf-8") hash.update(filename) return hash.hexdigest() def get_source_checksum(self, source): """Returns a checksum for the source.""" return sha1(source.encode("utf-8")).hexdigest() def get_bucket(self, environment, name, filename, source): """Return a cache bucket for the given template. All arguments are mandatory but filename may be `None`. """ key = self.get_cache_key(name, filename) checksum = self.get_source_checksum(source) bucket = Bucket(environment, key, checksum) self.load_bytecode(bucket) return bucket def set_bucket(self, bucket): """Put the bucket into the cache.""" self.dump_bytecode(bucket) class FileSystemBytecodeCache(BytecodeCache): """A bytecode cache that stores bytecode on the filesystem. It accepts two arguments: The directory where the cache items are stored and a pattern string that is used to build the filename. If no directory is specified a default cache directory is selected. On Windows the user's temp directory is used, on UNIX systems a directory is created for the user in the system temp directory. The pattern can be used to have multiple separate caches operate on the same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s`` is replaced with the cache key. >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache') This bytecode cache supports clearing of the cache using the clear method. """ def __init__(self, directory=None, pattern="__jinja2_%s.cache"): if directory is None: directory = self._get_default_cache_dir() self.directory = directory self.pattern = pattern def _get_default_cache_dir(self): def _unsafe_dir(): raise RuntimeError( "Cannot determine safe temp directory. You " "need to explicitly provide one." ) tmpdir = tempfile.gettempdir() # On windows the temporary directory is used specific unless # explicitly forced otherwise. We can just use that. if os.name == "nt": return tmpdir if not hasattr(os, "getuid"): _unsafe_dir() dirname = "_jinja2-cache-%d" % os.getuid() actual_dir = os.path.join(tmpdir, dirname) try: os.mkdir(actual_dir, stat.S_IRWXU) except OSError as e: if e.errno != errno.EEXIST: raise try: os.chmod(actual_dir, stat.S_IRWXU) actual_dir_stat = os.lstat(actual_dir) if ( actual_dir_stat.st_uid != os.getuid() or not stat.S_ISDIR(actual_dir_stat.st_mode) or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU ): _unsafe_dir() except OSError as e: if e.errno != errno.EEXIST: raise actual_dir_stat = os.lstat(actual_dir) if ( actual_dir_stat.st_uid != os.getuid() or not stat.S_ISDIR(actual_dir_stat.st_mode) or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU ): _unsafe_dir() return actual_dir def _get_cache_filename(self, bucket): return path.join(self.directory, self.pattern % bucket.key) def load_bytecode(self, bucket): f = open_if_exists(self._get_cache_filename(bucket), "rb") if f is not None: try: bucket.load_bytecode(f) finally: f.close() def dump_bytecode(self, bucket): f = open(self._get_cache_filename(bucket), "wb") try: bucket.write_bytecode(f) finally: f.close() def clear(self): # imported lazily here because google app-engine doesn't support # write access on the file system and the function does not exist # normally. from os import remove files = fnmatch.filter(listdir(self.directory), self.pattern % "*") for filename in files: try: remove(path.join(self.directory, filename)) except OSError: pass class MemcachedBytecodeCache(BytecodeCache): """This class implements a bytecode cache that uses a memcache cache for storing the information. It does not enforce a specific memcache library (tummy's memcache or cmemcache) but will accept any class that provides the minimal interface required. Libraries compatible with this class: - `cachelib <https://github.com/pallets/cachelib>`_ - `python-memcached <https://pypi.org/project/python-memcached/>`_ (Unfortunately the django cache interface is not compatible because it does not support storing binary data, only unicode. You can however pass the underlying cache client to the bytecode cache which is available as `django.core.cache.cache._client`.) The minimal interface for the client passed to the constructor is this: .. class:: MinimalClientInterface .. method:: set(key, value[, timeout]) Stores the bytecode in the cache. `value` is a string and `timeout` the timeout of the key. If timeout is not provided a default timeout or no timeout should be assumed, if it's provided it's an integer with the number of seconds the cache item should exist. .. method:: get(key) Returns the value for the cache key. If the item does not exist in the cache the return value must be `None`. The other arguments to the constructor are the prefix for all keys that is added before the actual cache key and the timeout for the bytecode in the cache system. We recommend a high (or no) timeout. This bytecode cache does not support clearing of used items in the cache. The clear method is a no-operation function. .. versionadded:: 2.7 Added support for ignoring memcache errors through the `ignore_memcache_errors` parameter. """ def __init__( self, client, prefix="jinja2/bytecode/", timeout=None, ignore_memcache_errors=True, ): self.client = client self.prefix = prefix self.timeout = timeout self.ignore_memcache_errors = ignore_memcache_errors def load_bytecode(self, bucket): try: code = self.client.get(self.prefix + bucket.key) except Exception: if not self.ignore_memcache_errors: raise code = None if code is not None: bucket.bytecode_from_string(code) def dump_bytecode(self, bucket): args = (self.prefix + bucket.key, bucket.bytecode_to_string()) if self.timeout is not None: args += (self.timeout,) try: self.client.set(*args) except Exception: if not self.ignore_memcache_errors: raise
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/filters.py
# -*- coding: utf-8 -*- """Built-in template filters used with the ``|`` operator.""" import math import random import re import warnings from collections import namedtuple from itertools import chain from itertools import groupby from markupsafe import escape from markupsafe import Markup from markupsafe import soft_unicode from ._compat import abc from ._compat import imap from ._compat import iteritems from ._compat import string_types from ._compat import text_type from .exceptions import FilterArgumentError from .runtime import Undefined from .utils import htmlsafe_json_dumps from .utils import pformat from .utils import unicode_urlencode from .utils import urlize _word_re = re.compile(r"\w+", re.UNICODE) _word_beginning_split_re = re.compile(r"([-\s\(\{\[\<]+)", re.UNICODE) def contextfilter(f): """Decorator for marking context dependent filters. The current :class:`Context` will be passed as first argument. """ f.contextfilter = True return f def evalcontextfilter(f): """Decorator for marking eval-context dependent filters. An eval context object is passed as first argument. For more information about the eval context, see :ref:`eval-context`. .. versionadded:: 2.4 """ f.evalcontextfilter = True return f def environmentfilter(f): """Decorator for marking environment dependent filters. The current :class:`Environment` is passed to the filter as first argument. """ f.environmentfilter = True return f def ignore_case(value): """For use as a postprocessor for :func:`make_attrgetter`. Converts strings to lowercase and returns other types as-is.""" return value.lower() if isinstance(value, string_types) else value def make_attrgetter(environment, attribute, postprocess=None, default=None): """Returns a callable that looks up the given attribute from a passed object with the rules of the environment. Dots are allowed to access attributes of attributes. Integer parts in paths are looked up as integers. """ attribute = _prepare_attribute_parts(attribute) def attrgetter(item): for part in attribute: item = environment.getitem(item, part) if default and isinstance(item, Undefined): item = default if postprocess is not None: item = postprocess(item) return item return attrgetter def make_multi_attrgetter(environment, attribute, postprocess=None): """Returns a callable that looks up the given comma separated attributes from a passed object with the rules of the environment. Dots are allowed to access attributes of each attribute. Integer parts in paths are looked up as integers. The value returned by the returned callable is a list of extracted attribute values. Examples of attribute: "attr1,attr2", "attr1.inner1.0,attr2.inner2.0", etc. """ attribute_parts = ( attribute.split(",") if isinstance(attribute, string_types) else [attribute] ) attribute = [ _prepare_attribute_parts(attribute_part) for attribute_part in attribute_parts ] def attrgetter(item): items = [None] * len(attribute) for i, attribute_part in enumerate(attribute): item_i = item for part in attribute_part: item_i = environment.getitem(item_i, part) if postprocess is not None: item_i = postprocess(item_i) items[i] = item_i return items return attrgetter def _prepare_attribute_parts(attr): if attr is None: return [] elif isinstance(attr, string_types): return [int(x) if x.isdigit() else x for x in attr.split(".")] else: return [attr] def do_forceescape(value): """Enforce HTML escaping. This will probably double escape variables.""" if hasattr(value, "__html__"): value = value.__html__() return escape(text_type(value)) def do_urlencode(value): """Quote data for use in a URL path or query using UTF-8. Basic wrapper around :func:`urllib.parse.quote` when given a string, or :func:`urllib.parse.urlencode` for a dict or iterable. :param value: Data to quote. A string will be quoted directly. A dict or iterable of ``(key, value)`` pairs will be joined as a query string. When given a string, "/" is not quoted. HTTP servers treat "/" and "%2F" equivalently in paths. If you need quoted slashes, use the ``|replace("/", "%2F")`` filter. .. versionadded:: 2.7 """ if isinstance(value, string_types) or not isinstance(value, abc.Iterable): return unicode_urlencode(value) if isinstance(value, dict): items = iteritems(value) else: items = iter(value) return u"&".join( "%s=%s" % (unicode_urlencode(k, for_qs=True), unicode_urlencode(v, for_qs=True)) for k, v in items ) @evalcontextfilter def do_replace(eval_ctx, s, old, new, count=None): """Return a copy of the value with all occurrences of a substring replaced with a new one. The first argument is the substring that should be replaced, the second is the replacement string. If the optional third argument ``count`` is given, only the first ``count`` occurrences are replaced: .. sourcecode:: jinja {{ "Hello World"|replace("Hello", "Goodbye") }} -> Goodbye World {{ "aaaaargh"|replace("a", "d'oh, ", 2) }} -> d'oh, d'oh, aaargh """ if count is None: count = -1 if not eval_ctx.autoescape: return text_type(s).replace(text_type(old), text_type(new), count) if ( hasattr(old, "__html__") or hasattr(new, "__html__") and not hasattr(s, "__html__") ): s = escape(s) else: s = soft_unicode(s) return s.replace(soft_unicode(old), soft_unicode(new), count) def do_upper(s): """Convert a value to uppercase.""" return soft_unicode(s).upper() def do_lower(s): """Convert a value to lowercase.""" return soft_unicode(s).lower() @evalcontextfilter def do_xmlattr(_eval_ctx, d, autospace=True): """Create an SGML/XML attribute string based on the items in a dict. All values that are neither `none` nor `undefined` are automatically escaped: .. sourcecode:: html+jinja <ul{{ {'class': 'my_list', 'missing': none, 'id': 'list-%d'|format(variable)}|xmlattr }}> ... </ul> Results in something like this: .. sourcecode:: html <ul class="my_list" id="list-42"> ... </ul> As you can see it automatically prepends a space in front of the item if the filter returned something unless the second parameter is false. """ rv = u" ".join( u'%s="%s"' % (escape(key), escape(value)) for key, value in iteritems(d) if value is not None and not isinstance(value, Undefined) ) if autospace and rv: rv = u" " + rv if _eval_ctx.autoescape: rv = Markup(rv) return rv def do_capitalize(s): """Capitalize a value. The first character will be uppercase, all others lowercase. """ return soft_unicode(s).capitalize() def do_title(s): """Return a titlecased version of the value. I.e. words will start with uppercase letters, all remaining characters are lowercase. """ return "".join( [ item[0].upper() + item[1:].lower() for item in _word_beginning_split_re.split(soft_unicode(s)) if item ] ) def do_dictsort(value, case_sensitive=False, by="key", reverse=False): """Sort a dict and yield (key, value) pairs. Because python dicts are unsorted you may want to use this function to order them by either key or value: .. sourcecode:: jinja {% for item in mydict|dictsort %} sort the dict by key, case insensitive {% for item in mydict|dictsort(reverse=true) %} sort the dict by key, case insensitive, reverse order {% for item in mydict|dictsort(true) %} sort the dict by key, case sensitive {% for item in mydict|dictsort(false, 'value') %} sort the dict by value, case insensitive """ if by == "key": pos = 0 elif by == "value": pos = 1 else: raise FilterArgumentError('You can only sort by either "key" or "value"') def sort_func(item): value = item[pos] if not case_sensitive: value = ignore_case(value) return value return sorted(value.items(), key=sort_func, reverse=reverse) @environmentfilter def do_sort(environment, value, reverse=False, case_sensitive=False, attribute=None): """Sort an iterable using Python's :func:`sorted`. .. sourcecode:: jinja {% for city in cities|sort %} ... {% endfor %} :param reverse: Sort descending instead of ascending. :param case_sensitive: When sorting strings, sort upper and lower case separately. :param attribute: When sorting objects or dicts, an attribute or key to sort by. Can use dot notation like ``"address.city"``. Can be a list of attributes like ``"age,name"``. The sort is stable, it does not change the relative order of elements that compare equal. This makes it is possible to chain sorts on different attributes and ordering. .. sourcecode:: jinja {% for user in users|sort(attribute="name") |sort(reverse=true, attribute="age") %} ... {% endfor %} As a shortcut to chaining when the direction is the same for all attributes, pass a comma separate list of attributes. .. sourcecode:: jinja {% for user users|sort(attribute="age,name") %} ... {% endfor %} .. versionchanged:: 2.11.0 The ``attribute`` parameter can be a comma separated list of attributes, e.g. ``"age,name"``. .. versionchanged:: 2.6 The ``attribute`` parameter was added. """ key_func = make_multi_attrgetter( environment, attribute, postprocess=ignore_case if not case_sensitive else None ) return sorted(value, key=key_func, reverse=reverse) @environmentfilter def do_unique(environment, value, case_sensitive=False, attribute=None): """Returns a list of unique items from the given iterable. .. sourcecode:: jinja {{ ['foo', 'bar', 'foobar', 'FooBar']|unique|list }} -> ['foo', 'bar', 'foobar'] The unique items are yielded in the same order as their first occurrence in the iterable passed to the filter. :param case_sensitive: Treat upper and lower case strings as distinct. :param attribute: Filter objects with unique values for this attribute. """ getter = make_attrgetter( environment, attribute, postprocess=ignore_case if not case_sensitive else None ) seen = set() for item in value: key = getter(item) if key not in seen: seen.add(key) yield item def _min_or_max(environment, value, func, case_sensitive, attribute): it = iter(value) try: first = next(it) except StopIteration: return environment.undefined("No aggregated item, sequence was empty.") key_func = make_attrgetter( environment, attribute, postprocess=ignore_case if not case_sensitive else None ) return func(chain([first], it), key=key_func) @environmentfilter def do_min(environment, value, case_sensitive=False, attribute=None): """Return the smallest item from the sequence. .. sourcecode:: jinja {{ [1, 2, 3]|min }} -> 1 :param case_sensitive: Treat upper and lower case strings as distinct. :param attribute: Get the object with the min value of this attribute. """ return _min_or_max(environment, value, min, case_sensitive, attribute) @environmentfilter def do_max(environment, value, case_sensitive=False, attribute=None): """Return the largest item from the sequence. .. sourcecode:: jinja {{ [1, 2, 3]|max }} -> 3 :param case_sensitive: Treat upper and lower case strings as distinct. :param attribute: Get the object with the max value of this attribute. """ return _min_or_max(environment, value, max, case_sensitive, attribute) def do_default(value, default_value=u"", boolean=False): """If the value is undefined it will return the passed default value, otherwise the value of the variable: .. sourcecode:: jinja {{ my_variable|default('my_variable is not defined') }} This will output the value of ``my_variable`` if the variable was defined, otherwise ``'my_variable is not defined'``. If you want to use default with variables that evaluate to false you have to set the second parameter to `true`: .. sourcecode:: jinja {{ ''|default('the string was empty', true) }} .. versionchanged:: 2.11 It's now possible to configure the :class:`~jinja2.Environment` with :class:`~jinja2.ChainableUndefined` to make the `default` filter work on nested elements and attributes that may contain undefined values in the chain without getting an :exc:`~jinja2.UndefinedError`. """ if isinstance(value, Undefined) or (boolean and not value): return default_value return value @evalcontextfilter def do_join(eval_ctx, value, d=u"", attribute=None): """Return a string which is the concatenation of the strings in the sequence. The separator between elements is an empty string per default, you can define it with the optional parameter: .. sourcecode:: jinja {{ [1, 2, 3]|join('|') }} -> 1|2|3 {{ [1, 2, 3]|join }} -> 123 It is also possible to join certain attributes of an object: .. sourcecode:: jinja {{ users|join(', ', attribute='username') }} .. versionadded:: 2.6 The `attribute` parameter was added. """ if attribute is not None: value = imap(make_attrgetter(eval_ctx.environment, attribute), value) # no automatic escaping? joining is a lot easier then if not eval_ctx.autoescape: return text_type(d).join(imap(text_type, value)) # if the delimiter doesn't have an html representation we check # if any of the items has. If yes we do a coercion to Markup if not hasattr(d, "__html__"): value = list(value) do_escape = False for idx, item in enumerate(value): if hasattr(item, "__html__"): do_escape = True else: value[idx] = text_type(item) if do_escape: d = escape(d) else: d = text_type(d) return d.join(value) # no html involved, to normal joining return soft_unicode(d).join(imap(soft_unicode, value)) def do_center(value, width=80): """Centers the value in a field of a given width.""" return text_type(value).center(width) @environmentfilter def do_first(environment, seq): """Return the first item of a sequence.""" try: return next(iter(seq)) except StopIteration: return environment.undefined("No first item, sequence was empty.") @environmentfilter def do_last(environment, seq): """ Return the last item of a sequence. Note: Does not work with generators. You may want to explicitly convert it to a list: .. sourcecode:: jinja {{ data | selectattr('name', '==', 'Jinja') | list | last }} """ try: return next(iter(reversed(seq))) except StopIteration: return environment.undefined("No last item, sequence was empty.") @contextfilter def do_random(context, seq): """Return a random item from the sequence.""" try: return random.choice(seq) except IndexError: return context.environment.undefined("No random item, sequence was empty.") def do_filesizeformat(value, binary=False): """Format the value like a 'human-readable' file size (i.e. 13 kB, 4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega, Giga, etc.), if the second parameter is set to `True` the binary prefixes are used (Mebi, Gibi). """ bytes = float(value) base = binary and 1024 or 1000 prefixes = [ (binary and "KiB" or "kB"), (binary and "MiB" or "MB"), (binary and "GiB" or "GB"), (binary and "TiB" or "TB"), (binary and "PiB" or "PB"), (binary and "EiB" or "EB"), (binary and "ZiB" or "ZB"), (binary and "YiB" or "YB"), ] if bytes == 1: return "1 Byte" elif bytes < base: return "%d Bytes" % bytes else: for i, prefix in enumerate(prefixes): unit = base ** (i + 2) if bytes < unit: return "%.1f %s" % ((base * bytes / unit), prefix) return "%.1f %s" % ((base * bytes / unit), prefix) def do_pprint(value, verbose=False): """Pretty print a variable. Useful for debugging. With Jinja 1.2 onwards you can pass it a parameter. If this parameter is truthy the output will be more verbose (this requires `pretty`) """ return pformat(value, verbose=verbose) @evalcontextfilter def do_urlize( eval_ctx, value, trim_url_limit=None, nofollow=False, target=None, rel=None ): """Converts URLs in plain text into clickable links. If you pass the filter an additional integer it will shorten the urls to that number. Also a third argument exists that makes the urls "nofollow": .. sourcecode:: jinja {{ mytext|urlize(40, true) }} links are shortened to 40 chars and defined with rel="nofollow" If *target* is specified, the ``target`` attribute will be added to the ``<a>`` tag: .. sourcecode:: jinja {{ mytext|urlize(40, target='_blank') }} .. versionchanged:: 2.8+ The *target* parameter was added. """ policies = eval_ctx.environment.policies rel = set((rel or "").split() or []) if nofollow: rel.add("nofollow") rel.update((policies["urlize.rel"] or "").split()) if target is None: target = policies["urlize.target"] rel = " ".join(sorted(rel)) or None rv = urlize(value, trim_url_limit, rel=rel, target=target) if eval_ctx.autoescape: rv = Markup(rv) return rv def do_indent(s, width=4, first=False, blank=False, indentfirst=None): """Return a copy of the string with each line indented by 4 spaces. The first line and blank lines are not indented by default. :param width: Number of spaces to indent by. :param first: Don't skip indenting the first line. :param blank: Don't skip indenting empty lines. .. versionchanged:: 2.10 Blank lines are not indented by default. Rename the ``indentfirst`` argument to ``first``. """ if indentfirst is not None: warnings.warn( "The 'indentfirst' argument is renamed to 'first' and will" " be removed in version 3.0.", DeprecationWarning, stacklevel=2, ) first = indentfirst indention = u" " * width newline = u"\n" if isinstance(s, Markup): indention = Markup(indention) newline = Markup(newline) s += newline # this quirk is necessary for splitlines method if blank: rv = (newline + indention).join(s.splitlines()) else: lines = s.splitlines() rv = lines.pop(0) if lines: rv += newline + newline.join( indention + line if line else line for line in lines ) if first: rv = indention + rv return rv @environmentfilter def do_truncate(env, s, length=255, killwords=False, end="...", leeway=None): """Return a truncated copy of the string. The length is specified with the first parameter which defaults to ``255``. If the second parameter is ``true`` the filter will cut the text at length. Otherwise it will discard the last word. If the text was in fact truncated it will append an ellipsis sign (``"..."``). If you want a different ellipsis sign than ``"..."`` you can specify it using the third parameter. Strings that only exceed the length by the tolerance margin given in the fourth parameter will not be truncated. .. sourcecode:: jinja {{ "foo bar baz qux"|truncate(9) }} -> "foo..." {{ "foo bar baz qux"|truncate(9, True) }} -> "foo ba..." {{ "foo bar baz qux"|truncate(11) }} -> "foo bar baz qux" {{ "foo bar baz qux"|truncate(11, False, '...', 0) }} -> "foo bar..." The default leeway on newer Jinja versions is 5 and was 0 before but can be reconfigured globally. """ if leeway is None: leeway = env.policies["truncate.leeway"] assert length >= len(end), "expected length >= %s, got %s" % (len(end), length) assert leeway >= 0, "expected leeway >= 0, got %s" % leeway if len(s) <= length + leeway: return s if killwords: return s[: length - len(end)] + end result = s[: length - len(end)].rsplit(" ", 1)[0] return result + end @environmentfilter def do_wordwrap( environment, s, width=79, break_long_words=True, wrapstring=None, break_on_hyphens=True, ): """Wrap a string to the given width. Existing newlines are treated as paragraphs to be wrapped separately. :param s: Original text to wrap. :param width: Maximum length of wrapped lines. :param break_long_words: If a word is longer than ``width``, break it across lines. :param break_on_hyphens: If a word contains hyphens, it may be split across lines. :param wrapstring: String to join each wrapped line. Defaults to :attr:`Environment.newline_sequence`. .. versionchanged:: 2.11 Existing newlines are treated as paragraphs wrapped separately. .. versionchanged:: 2.11 Added the ``break_on_hyphens`` parameter. .. versionchanged:: 2.7 Added the ``wrapstring`` parameter. """ import textwrap if not wrapstring: wrapstring = environment.newline_sequence # textwrap.wrap doesn't consider existing newlines when wrapping. # If the string has a newline before width, wrap will still insert # a newline at width, resulting in a short line. Instead, split and # wrap each paragraph individually. return wrapstring.join( [ wrapstring.join( textwrap.wrap( line, width=width, expand_tabs=False, replace_whitespace=False, break_long_words=break_long_words, break_on_hyphens=break_on_hyphens, ) ) for line in s.splitlines() ] ) def do_wordcount(s): """Count the words in that string.""" return len(_word_re.findall(soft_unicode(s))) def do_int(value, default=0, base=10): """Convert the value into an integer. If the conversion doesn't work it will return ``0``. You can override this default using the first parameter. You can also override the default base (10) in the second parameter, which handles input with prefixes such as 0b, 0o and 0x for bases 2, 8 and 16 respectively. The base is ignored for decimal numbers and non-string values. """ try: if isinstance(value, string_types): return int(value, base) return int(value) except (TypeError, ValueError): # this quirk is necessary so that "42.23"|int gives 42. try: return int(float(value)) except (TypeError, ValueError): return default def do_float(value, default=0.0): """Convert the value into a floating point number. If the conversion doesn't work it will return ``0.0``. You can override this default using the first parameter. """ try: return float(value) except (TypeError, ValueError): return default def do_format(value, *args, **kwargs): """Apply the given values to a `printf-style`_ format string, like ``string % values``. .. sourcecode:: jinja {{ "%s, %s!"|format(greeting, name) }} Hello, World! In most cases it should be more convenient and efficient to use the ``%`` operator or :meth:`str.format`. .. code-block:: text {{ "%s, %s!" % (greeting, name) }} {{ "{}, {}!".format(greeting, name) }} .. _printf-style: https://docs.python.org/library/stdtypes.html #printf-style-string-formatting """ if args and kwargs: raise FilterArgumentError( "can't handle positional and keyword arguments at the same time" ) return soft_unicode(value) % (kwargs or args) def do_trim(value, chars=None): """Strip leading and trailing characters, by default whitespace.""" return soft_unicode(value).strip(chars) def do_striptags(value): """Strip SGML/XML tags and replace adjacent whitespace by one space.""" if hasattr(value, "__html__"): value = value.__html__() return Markup(text_type(value)).striptags() def do_slice(value, slices, fill_with=None): """Slice an iterator and return a list of lists containing those items. Useful if you want to create a div containing three ul tags that represent columns: .. sourcecode:: html+jinja <div class="columnwrapper"> {%- for column in items|slice(3) %} <ul class="column-{{ loop.index }}"> {%- for item in column %} <li>{{ item }}</li> {%- endfor %} </ul> {%- endfor %} </div> If you pass it a second argument it's used to fill missing values on the last iteration. """ seq = list(value) length = len(seq) items_per_slice = length // slices slices_with_extra = length % slices offset = 0 for slice_number in range(slices): start = offset + slice_number * items_per_slice if slice_number < slices_with_extra: offset += 1 end = offset + (slice_number + 1) * items_per_slice tmp = seq[start:end] if fill_with is not None and slice_number >= slices_with_extra: tmp.append(fill_with) yield tmp def do_batch(value, linecount, fill_with=None): """ A filter that batches items. It works pretty much like `slice` just the other way round. It returns a list of lists with the given number of items. If you provide a second parameter this is used to fill up missing items. See this example: .. sourcecode:: html+jinja <table> {%- for row in items|batch(3, '&nbsp;') %} <tr> {%- for column in row %} <td>{{ column }}</td> {%- endfor %} </tr> {%- endfor %} </table> """ tmp = [] for item in value: if len(tmp) == linecount: yield tmp tmp = [] tmp.append(item) if tmp: if fill_with is not None and len(tmp) < linecount: tmp += [fill_with] * (linecount - len(tmp)) yield tmp def do_round(value, precision=0, method="common"): """Round the number to a given precision. The first parameter specifies the precision (default is ``0``), the second the rounding method: - ``'common'`` rounds either up or down - ``'ceil'`` always rounds up - ``'floor'`` always rounds down If you don't specify a method ``'common'`` is used. .. sourcecode:: jinja {{ 42.55|round }} -> 43.0 {{ 42.55|round(1, 'floor') }} -> 42.5 Note that even if rounded to 0 precision, a float is returned. If you need a real integer, pipe it through `int`: .. sourcecode:: jinja {{ 42.55|round|int }} -> 43 """ if method not in {"common", "ceil", "floor"}: raise FilterArgumentError("method must be common, ceil or floor") if method == "common": return round(value, precision) func = getattr(math, method) return func(value * (10 ** precision)) / (10 ** precision) # Use a regular tuple repr here. This is what we did in the past and we # really want to hide this custom type as much as possible. In particular # we do not want to accidentally expose an auto generated repr in case # people start to print this out in comments or something similar for # debugging. _GroupTuple = namedtuple("_GroupTuple", ["grouper", "list"]) _GroupTuple.__repr__ = tuple.__repr__ _GroupTuple.__str__ = tuple.__str__ @environmentfilter def do_groupby(environment, value, attribute): """Group a sequence of objects by an attribute using Python's :func:`itertools.groupby`. The attribute can use dot notation for nested access, like ``"address.city"``. Unlike Python's ``groupby``, the values are sorted first so only one group is returned for each unique value. For example, a list of ``User`` objects with a ``city`` attribute can be rendered in groups. In this example, ``grouper`` refers to the ``city`` value of the group. .. sourcecode:: html+jinja <ul>{% for city, items in users|groupby("city") %} <li>{{ city }} <ul>{% for user in items %} <li>{{ user.name }} {% endfor %}</ul> </li> {% endfor %}</ul> ``groupby`` yields namedtuples of ``(grouper, list)``, which can be used instead of the tuple unpacking above. ``grouper`` is the value of the attribute, and ``list`` is the items with that value. .. sourcecode:: html+jinja <ul>{% for group in users|groupby("city") %} <li>{{ group.grouper }}: {{ group.list|join(", ") }} {% endfor %}</ul> .. versionchanged:: 2.6 The attribute supports dot notation for nested access. """ expr = make_attrgetter(environment, attribute) return [ _GroupTuple(key, list(values)) for key, values in groupby(sorted(value, key=expr), expr) ] @environmentfilter def do_sum(environment, iterable, attribute=None, start=0): """Returns the sum of a sequence of numbers plus the value of parameter 'start' (which defaults to 0). When the sequence is empty it returns start. It is also possible to sum up only certain attributes: .. sourcecode:: jinja Total: {{ items|sum(attribute='price') }} .. versionchanged:: 2.6 The `attribute` parameter was added to allow suming up over attributes. Also the `start` parameter was moved on to the right. """ if attribute is not None: iterable = imap(make_attrgetter(environment, attribute), iterable) return sum(iterable, start) def do_list(value): """Convert the value into a list. If it was a string the returned list will be a list of characters. """ return list(value) def do_mark_safe(value): """Mark the value as safe which means that in an environment with automatic escaping enabled this variable will not be escaped. """ return Markup(value) def do_mark_unsafe(value): """Mark a value as unsafe. This is the reverse operation for :func:`safe`.""" return text_type(value) def do_reverse(value): """Reverse the object or return an iterator that iterates over it the other way round. """ if isinstance(value, string_types): return value[::-1] try: return reversed(value) except TypeError: try: rv = list(value) rv.reverse() return rv except TypeError: raise FilterArgumentError("argument must be iterable") @environmentfilter def do_attr(environment, obj, name): """Get an attribute of an object. ``foo|attr("bar")`` works like ``foo.bar`` just that always an attribute is returned and items are not looked up. See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details. """ try: name = str(name) except UnicodeError: pass else: try: value = getattr(obj, name) except AttributeError: pass else: if environment.sandboxed and not environment.is_safe_attribute( obj, name, value ): return environment.unsafe_undefined(obj, name) return value return environment.undefined(obj=obj, name=name) @contextfilter def do_map(*args, **kwargs): """Applies a filter on a sequence of objects or looks up an attribute. This is useful when dealing with lists of objects but you are really only interested in a certain value of it. The basic usage is mapping on an attribute. Imagine you have a list of users but you are only interested in a list of usernames: .. sourcecode:: jinja Users on this page: {{ users|map(attribute='username')|join(', ') }} You can specify a ``default`` value to use if an object in the list does not have the given attribute. .. sourcecode:: jinja {{ users|map(attribute="username", default="Anonymous")|join(", ") }} Alternatively you can let it invoke a filter by passing the name of the filter and the arguments afterwards. A good example would be applying a text conversion filter on a sequence: .. sourcecode:: jinja Users on this page: {{ titles|map('lower')|join(', ') }} Similar to a generator comprehension such as: .. code-block:: python (u.username for u in users) (u.username or "Anonymous" for u in users) (do_lower(x) for x in titles) .. versionchanged:: 2.11.0 Added the ``default`` parameter. .. versionadded:: 2.7 """ seq, func = prepare_map(args, kwargs) if seq: for item in seq: yield func(item) @contextfilter def do_select(*args, **kwargs): """Filters a sequence of objects by applying a test to each object, and only selecting the objects with the test succeeding. If no test is specified, each object will be evaluated as a boolean. Example usage: .. sourcecode:: jinja {{ numbers|select("odd") }} {{ numbers|select("odd") }} {{ numbers|select("divisibleby", 3) }} {{ numbers|select("lessthan", 42) }} {{ strings|select("equalto", "mystring") }} Similar to a generator comprehension such as: .. code-block:: python (n for n in numbers if test_odd(n)) (n for n in numbers if test_divisibleby(n, 3)) .. versionadded:: 2.7 """ return select_or_reject(args, kwargs, lambda x: x, False) @contextfilter def do_reject(*args, **kwargs): """Filters a sequence of objects by applying a test to each object, and rejecting the objects with the test succeeding. If no test is specified, each object will be evaluated as a boolean. Example usage: .. sourcecode:: jinja {{ numbers|reject("odd") }} Similar to a generator comprehension such as: .. code-block:: python (n for n in numbers if not test_odd(n)) .. versionadded:: 2.7 """ return select_or_reject(args, kwargs, lambda x: not x, False) @contextfilter def do_selectattr(*args, **kwargs): """Filters a sequence of objects by applying a test to the specified attribute of each object, and only selecting the objects with the test succeeding. If no test is specified, the attribute's value will be evaluated as a boolean. Example usage: .. sourcecode:: jinja {{ users|selectattr("is_active") }} {{ users|selectattr("email", "none") }} Similar to a generator comprehension such as: .. code-block:: python (u for user in users if user.is_active) (u for user in users if test_none(user.email)) .. versionadded:: 2.7 """ return select_or_reject(args, kwargs, lambda x: x, True) @contextfilter def do_rejectattr(*args, **kwargs): """Filters a sequence of objects by applying a test to the specified attribute of each object, and rejecting the objects with the test succeeding. If no test is specified, the attribute's value will be evaluated as a boolean. .. sourcecode:: jinja {{ users|rejectattr("is_active") }} {{ users|rejectattr("email", "none") }} Similar to a generator comprehension such as: .. code-block:: python (u for user in users if not user.is_active) (u for user in users if not test_none(user.email)) .. versionadded:: 2.7 """ return select_or_reject(args, kwargs, lambda x: not x, True) @evalcontextfilter def do_tojson(eval_ctx, value, indent=None): """Dumps a structure to JSON so that it's safe to use in ``<script>`` tags. It accepts the same arguments and returns a JSON string. Note that this is available in templates through the ``|tojson`` filter which will also mark the result as safe. Due to how this function escapes certain characters this is safe even if used outside of ``<script>`` tags. The following characters are escaped in strings: - ``<`` - ``>`` - ``&`` - ``'`` This makes it safe to embed such strings in any place in HTML with the notable exception of double quoted attributes. In that case single quote your attributes or HTML escape it in addition. The indent parameter can be used to enable pretty printing. Set it to the number of spaces that the structures should be indented with. Note that this filter is for use in HTML contexts only. .. versionadded:: 2.9 """ policies = eval_ctx.environment.policies dumper = policies["json.dumps_function"] options = policies["json.dumps_kwargs"] if indent is not None: options = dict(options) options["indent"] = indent return htmlsafe_json_dumps(value, dumper=dumper, **options) def prepare_map(args, kwargs): context = args[0] seq = args[1] default = None if len(args) == 2 and "attribute" in kwargs: attribute = kwargs.pop("attribute") default = kwargs.pop("default", None) if kwargs: raise FilterArgumentError( "Unexpected keyword argument %r" % next(iter(kwargs)) ) func = make_attrgetter(context.environment, attribute, default=default) else: try: name = args[2] args = args[3:] except LookupError: raise FilterArgumentError("map requires a filter argument") def func(item): return context.environment.call_filter( name, item, args, kwargs, context=context ) return seq, func def prepare_select_or_reject(args, kwargs, modfunc, lookup_attr): context = args[0] seq = args[1] if lookup_attr: try: attr = args[2] except LookupError: raise FilterArgumentError("Missing parameter for attribute name") transfunc = make_attrgetter(context.environment, attr) off = 1 else: off = 0 def transfunc(x): return x try: name = args[2 + off] args = args[3 + off :] def func(item): return context.environment.call_test(name, item, args, kwargs) except LookupError: func = bool return seq, lambda item: modfunc(func(transfunc(item))) def select_or_reject(args, kwargs, modfunc, lookup_attr): seq, func = prepare_select_or_reject(args, kwargs, modfunc, lookup_attr) if seq: for item in seq: if func(item): yield item FILTERS = { "abs": abs, "attr": do_attr, "batch": do_batch, "capitalize": do_capitalize, "center": do_center, "count": len, "d": do_default, "default": do_default, "dictsort": do_dictsort, "e": escape, "escape": escape, "filesizeformat": do_filesizeformat, "first": do_first, "float": do_float, "forceescape": do_forceescape, "format": do_format, "groupby": do_groupby, "indent": do_indent, "int": do_int, "join": do_join, "last": do_last, "length": len, "list": do_list, "lower": do_lower, "map": do_map, "min": do_min, "max": do_max, "pprint": do_pprint, "random": do_random, "reject": do_reject, "rejectattr": do_rejectattr, "replace": do_replace, "reverse": do_reverse, "round": do_round, "safe": do_mark_safe, "select": do_select, "selectattr": do_selectattr, "slice": do_slice, "sort": do_sort, "string": soft_unicode, "striptags": do_striptags, "sum": do_sum, "title": do_title, "trim": do_trim, "truncate": do_truncate, "unique": do_unique, "upper": do_upper, "urlencode": do_urlencode, "urlize": do_urlize, "wordcount": do_wordcount, "wordwrap": do_wordwrap, "xmlattr": do_xmlattr, "tojson": do_tojson, }
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/ext.py
# -*- coding: utf-8 -*- """Extension API for adding custom tags and behavior.""" import pprint import re from sys import version_info from markupsafe import Markup from . import nodes from ._compat import iteritems from ._compat import string_types from ._compat import with_metaclass from .defaults import BLOCK_END_STRING from .defaults import BLOCK_START_STRING from .defaults import COMMENT_END_STRING from .defaults import COMMENT_START_STRING from .defaults import KEEP_TRAILING_NEWLINE from .defaults import LINE_COMMENT_PREFIX from .defaults import LINE_STATEMENT_PREFIX from .defaults import LSTRIP_BLOCKS from .defaults import NEWLINE_SEQUENCE from .defaults import TRIM_BLOCKS from .defaults import VARIABLE_END_STRING from .defaults import VARIABLE_START_STRING from .environment import Environment from .exceptions import TemplateAssertionError from .exceptions import TemplateSyntaxError from .nodes import ContextReference from .runtime import concat from .utils import contextfunction from .utils import import_string # the only real useful gettext functions for a Jinja template. Note # that ugettext must be assigned to gettext as Jinja doesn't support # non unicode strings. GETTEXT_FUNCTIONS = ("_", "gettext", "ngettext") _ws_re = re.compile(r"\s*\n\s*") class ExtensionRegistry(type): """Gives the extension an unique identifier.""" def __new__(mcs, name, bases, d): rv = type.__new__(mcs, name, bases, d) rv.identifier = rv.__module__ + "." + rv.__name__ return rv class Extension(with_metaclass(ExtensionRegistry, object)): """Extensions can be used to add extra functionality to the Jinja template system at the parser level. Custom extensions are bound to an environment but may not store environment specific data on `self`. The reason for this is that an extension can be bound to another environment (for overlays) by creating a copy and reassigning the `environment` attribute. As extensions are created by the environment they cannot accept any arguments for configuration. One may want to work around that by using a factory function, but that is not possible as extensions are identified by their import name. The correct way to configure the extension is storing the configuration values on the environment. Because this way the environment ends up acting as central configuration storage the attributes may clash which is why extensions have to ensure that the names they choose for configuration are not too generic. ``prefix`` for example is a terrible name, ``fragment_cache_prefix`` on the other hand is a good name as includes the name of the extension (fragment cache). """ #: if this extension parses this is the list of tags it's listening to. tags = set() #: the priority of that extension. This is especially useful for #: extensions that preprocess values. A lower value means higher #: priority. #: #: .. versionadded:: 2.4 priority = 100 def __init__(self, environment): self.environment = environment def bind(self, environment): """Create a copy of this extension bound to another environment.""" rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.environment = environment return rv def preprocess(self, source, name, filename=None): """This method is called before the actual lexing and can be used to preprocess the source. The `filename` is optional. The return value must be the preprocessed source. """ return source def filter_stream(self, stream): """It's passed a :class:`~jinja2.lexer.TokenStream` that can be used to filter tokens returned. This method has to return an iterable of :class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a :class:`~jinja2.lexer.TokenStream`. """ return stream def parse(self, parser): """If any of the :attr:`tags` matched this method is called with the parser as first argument. The token the parser stream is pointing at is the name token that matched. This method has to return one or a list of multiple nodes. """ raise NotImplementedError() def attr(self, name, lineno=None): """Return an attribute node for the current extension. This is useful to pass constants on extensions to generated template code. :: self.attr('_my_attribute', lineno=lineno) """ return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno) def call_method( self, name, args=None, kwargs=None, dyn_args=None, dyn_kwargs=None, lineno=None ): """Call a method of the extension. This is a shortcut for :meth:`attr` + :class:`jinja2.nodes.Call`. """ if args is None: args = [] if kwargs is None: kwargs = [] return nodes.Call( self.attr(name, lineno=lineno), args, kwargs, dyn_args, dyn_kwargs, lineno=lineno, ) @contextfunction def _gettext_alias(__context, *args, **kwargs): return __context.call(__context.resolve("gettext"), *args, **kwargs) def _make_new_gettext(func): @contextfunction def gettext(__context, __string, **variables): rv = __context.call(func, __string) if __context.eval_ctx.autoescape: rv = Markup(rv) # Always treat as a format string, even if there are no # variables. This makes translation strings more consistent # and predictable. This requires escaping return rv % variables return gettext def _make_new_ngettext(func): @contextfunction def ngettext(__context, __singular, __plural, __num, **variables): variables.setdefault("num", __num) rv = __context.call(func, __singular, __plural, __num) if __context.eval_ctx.autoescape: rv = Markup(rv) # Always treat as a format string, see gettext comment above. return rv % variables return ngettext class InternationalizationExtension(Extension): """This extension adds gettext support to Jinja.""" tags = {"trans"} # TODO: the i18n extension is currently reevaluating values in a few # situations. Take this example: # {% trans count=something() %}{{ count }} foo{% pluralize # %}{{ count }} fooss{% endtrans %} # something is called twice here. One time for the gettext value and # the other time for the n-parameter of the ngettext function. def __init__(self, environment): Extension.__init__(self, environment) environment.globals["_"] = _gettext_alias environment.extend( install_gettext_translations=self._install, install_null_translations=self._install_null, install_gettext_callables=self._install_callables, uninstall_gettext_translations=self._uninstall, extract_translations=self._extract, newstyle_gettext=False, ) def _install(self, translations, newstyle=None): gettext = getattr(translations, "ugettext", None) if gettext is None: gettext = translations.gettext ngettext = getattr(translations, "ungettext", None) if ngettext is None: ngettext = translations.ngettext self._install_callables(gettext, ngettext, newstyle) def _install_null(self, newstyle=None): self._install_callables( lambda x: x, lambda s, p, n: (n != 1 and (p,) or (s,))[0], newstyle ) def _install_callables(self, gettext, ngettext, newstyle=None): if newstyle is not None: self.environment.newstyle_gettext = newstyle if self.environment.newstyle_gettext: gettext = _make_new_gettext(gettext) ngettext = _make_new_ngettext(ngettext) self.environment.globals.update(gettext=gettext, ngettext=ngettext) def _uninstall(self, translations): for key in "gettext", "ngettext": self.environment.globals.pop(key, None) def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS): if isinstance(source, string_types): source = self.environment.parse(source) return extract_from_ast(source, gettext_functions) def parse(self, parser): """Parse a translatable tag.""" lineno = next(parser.stream).lineno num_called_num = False # find all the variables referenced. Additionally a variable can be # defined in the body of the trans block too, but this is checked at # a later state. plural_expr = None plural_expr_assignment = None variables = {} trimmed = None while parser.stream.current.type != "block_end": if variables: parser.stream.expect("comma") # skip colon for python compatibility if parser.stream.skip_if("colon"): break name = parser.stream.expect("name") if name.value in variables: parser.fail( "translatable variable %r defined twice." % name.value, name.lineno, exc=TemplateAssertionError, ) # expressions if parser.stream.current.type == "assign": next(parser.stream) variables[name.value] = var = parser.parse_expression() elif trimmed is None and name.value in ("trimmed", "notrimmed"): trimmed = name.value == "trimmed" continue else: variables[name.value] = var = nodes.Name(name.value, "load") if plural_expr is None: if isinstance(var, nodes.Call): plural_expr = nodes.Name("_trans", "load") variables[name.value] = plural_expr plural_expr_assignment = nodes.Assign( nodes.Name("_trans", "store"), var ) else: plural_expr = var num_called_num = name.value == "num" parser.stream.expect("block_end") plural = None have_plural = False referenced = set() # now parse until endtrans or pluralize singular_names, singular = self._parse_block(parser, True) if singular_names: referenced.update(singular_names) if plural_expr is None: plural_expr = nodes.Name(singular_names[0], "load") num_called_num = singular_names[0] == "num" # if we have a pluralize block, we parse that too if parser.stream.current.test("name:pluralize"): have_plural = True next(parser.stream) if parser.stream.current.type != "block_end": name = parser.stream.expect("name") if name.value not in variables: parser.fail( "unknown variable %r for pluralization" % name.value, name.lineno, exc=TemplateAssertionError, ) plural_expr = variables[name.value] num_called_num = name.value == "num" parser.stream.expect("block_end") plural_names, plural = self._parse_block(parser, False) next(parser.stream) referenced.update(plural_names) else: next(parser.stream) # register free names as simple name expressions for var in referenced: if var not in variables: variables[var] = nodes.Name(var, "load") if not have_plural: plural_expr = None elif plural_expr is None: parser.fail("pluralize without variables", lineno) if trimmed is None: trimmed = self.environment.policies["ext.i18n.trimmed"] if trimmed: singular = self._trim_whitespace(singular) if plural: plural = self._trim_whitespace(plural) node = self._make_node( singular, plural, variables, plural_expr, bool(referenced), num_called_num and have_plural, ) node.set_lineno(lineno) if plural_expr_assignment is not None: return [plural_expr_assignment, node] else: return node def _trim_whitespace(self, string, _ws_re=_ws_re): return _ws_re.sub(" ", string.strip()) def _parse_block(self, parser, allow_pluralize): """Parse until the next block tag with a given name.""" referenced = [] buf = [] while 1: if parser.stream.current.type == "data": buf.append(parser.stream.current.value.replace("%", "%%")) next(parser.stream) elif parser.stream.current.type == "variable_begin": next(parser.stream) name = parser.stream.expect("name").value referenced.append(name) buf.append("%%(%s)s" % name) parser.stream.expect("variable_end") elif parser.stream.current.type == "block_begin": next(parser.stream) if parser.stream.current.test("name:endtrans"): break elif parser.stream.current.test("name:pluralize"): if allow_pluralize: break parser.fail( "a translatable section can have only one pluralize section" ) parser.fail( "control structures in translatable sections are not allowed" ) elif parser.stream.eos: parser.fail("unclosed translation block") else: raise RuntimeError("internal parser error") return referenced, concat(buf) def _make_node( self, singular, plural, variables, plural_expr, vars_referenced, num_called_num ): """Generates a useful node from the data provided.""" # no variables referenced? no need to escape for old style # gettext invocations only if there are vars. if not vars_referenced and not self.environment.newstyle_gettext: singular = singular.replace("%%", "%") if plural: plural = plural.replace("%%", "%") # singular only: if plural_expr is None: gettext = nodes.Name("gettext", "load") node = nodes.Call(gettext, [nodes.Const(singular)], [], None, None) # singular and plural else: ngettext = nodes.Name("ngettext", "load") node = nodes.Call( ngettext, [nodes.Const(singular), nodes.Const(plural), plural_expr], [], None, None, ) # in case newstyle gettext is used, the method is powerful # enough to handle the variable expansion and autoescape # handling itself if self.environment.newstyle_gettext: for key, value in iteritems(variables): # the function adds that later anyways in case num was # called num, so just skip it. if num_called_num and key == "num": continue node.kwargs.append(nodes.Keyword(key, value)) # otherwise do that here else: # mark the return value as safe if we are in an # environment with autoescaping turned on node = nodes.MarkSafeIfAutoescape(node) if variables: node = nodes.Mod( node, nodes.Dict( [ nodes.Pair(nodes.Const(key), value) for key, value in variables.items() ] ), ) return nodes.Output([node]) class ExprStmtExtension(Extension): """Adds a `do` tag to Jinja that works like the print statement just that it doesn't print the return value. """ tags = set(["do"]) def parse(self, parser): node = nodes.ExprStmt(lineno=next(parser.stream).lineno) node.node = parser.parse_tuple() return node class LoopControlExtension(Extension): """Adds break and continue to the template engine.""" tags = set(["break", "continue"]) def parse(self, parser): token = next(parser.stream) if token.value == "break": return nodes.Break(lineno=token.lineno) return nodes.Continue(lineno=token.lineno) class WithExtension(Extension): pass class AutoEscapeExtension(Extension): pass class DebugExtension(Extension): """A ``{% debug %}`` tag that dumps the available variables, filters, and tests. .. code-block:: html+jinja <pre>{% debug %}</pre> .. code-block:: text {'context': {'cycler': <class 'jinja2.utils.Cycler'>, ..., 'namespace': <class 'jinja2.utils.Namespace'>}, 'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd', ..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'], 'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined', ..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']} .. versionadded:: 2.11.0 """ tags = {"debug"} def parse(self, parser): lineno = parser.stream.expect("name:debug").lineno context = ContextReference() result = self.call_method("_render", [context], lineno=lineno) return nodes.Output([result], lineno=lineno) def _render(self, context): result = { "context": context.get_all(), "filters": sorted(self.environment.filters.keys()), "tests": sorted(self.environment.tests.keys()), } # Set the depth since the intent is to show the top few names. if version_info[:2] >= (3, 4): return pprint.pformat(result, depth=3, compact=True) else: return pprint.pformat(result, depth=3) def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, babel_style=True): """Extract localizable strings from the given template node. Per default this function returns matches in babel style that means non string parameters as well as keyword arguments are returned as `None`. This allows Babel to figure out what you really meant if you are using gettext functions that allow keyword arguments for placeholder expansion. If you don't want that behavior set the `babel_style` parameter to `False` which causes only strings to be returned and parameters are always stored in tuples. As a consequence invalid gettext calls (calls without a single string parameter or string parameters after non-string parameters) are skipped. This example explains the behavior: >>> from jinja2 import Environment >>> env = Environment() >>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}') >>> list(extract_from_ast(node)) [(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))] >>> list(extract_from_ast(node, babel_style=False)) [(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))] For every string found this function yields a ``(lineno, function, message)`` tuple, where: * ``lineno`` is the number of the line on which the string was found, * ``function`` is the name of the ``gettext`` function used (if the string was extracted from embedded Python code), and * ``message`` is the string itself (a ``unicode`` object, or a tuple of ``unicode`` objects for functions with multiple string arguments). This extraction function operates on the AST and is because of that unable to extract any comments. For comment support you have to use the babel extraction interface or extract comments yourself. """ for node in node.find_all(nodes.Call): if ( not isinstance(node.node, nodes.Name) or node.node.name not in gettext_functions ): continue strings = [] for arg in node.args: if isinstance(arg, nodes.Const) and isinstance(arg.value, string_types): strings.append(arg.value) else: strings.append(None) for _ in node.kwargs: strings.append(None) if node.dyn_args is not None: strings.append(None) if node.dyn_kwargs is not None: strings.append(None) if not babel_style: strings = tuple(x for x in strings if x is not None) if not strings: continue else: if len(strings) == 1: strings = strings[0] else: strings = tuple(strings) yield node.lineno, node.node.name, strings class _CommentFinder(object): """Helper class to find comments in a token stream. Can only find comments for gettext calls forwards. Once the comment from line 4 is found, a comment for line 1 will not return a usable value. """ def __init__(self, tokens, comment_tags): self.tokens = tokens self.comment_tags = comment_tags self.offset = 0 self.last_lineno = 0 def find_backwards(self, offset): try: for _, token_type, token_value in reversed( self.tokens[self.offset : offset] ): if token_type in ("comment", "linecomment"): try: prefix, comment = token_value.split(None, 1) except ValueError: continue if prefix in self.comment_tags: return [comment.rstrip()] return [] finally: self.offset = offset def find_comments(self, lineno): if not self.comment_tags or self.last_lineno > lineno: return [] for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]): if token_lineno > lineno: return self.find_backwards(self.offset + idx) return self.find_backwards(len(self.tokens)) def babel_extract(fileobj, keywords, comment_tags, options): """Babel extraction method for Jinja templates. .. versionchanged:: 2.3 Basic support for translation comments was added. If `comment_tags` is now set to a list of keywords for extraction, the extractor will try to find the best preceding comment that begins with one of the keywords. For best results, make sure to not have more than one gettext call in one line of code and the matching comment in the same line or the line before. .. versionchanged:: 2.5.1 The `newstyle_gettext` flag can be set to `True` to enable newstyle gettext calls. .. versionchanged:: 2.7 A `silent` option can now be provided. If set to `False` template syntax errors are propagated instead of being ignored. :param fileobj: the file-like object the messages should be extracted from :param keywords: a list of keywords (i.e. function names) that should be recognized as translation functions :param comment_tags: a list of translator tags to search for and include in the results. :param options: a dictionary of additional options (optional) :return: an iterator over ``(lineno, funcname, message, comments)`` tuples. (comments will be empty currently) """ extensions = set() for extension in options.get("extensions", "").split(","): extension = extension.strip() if not extension: continue extensions.add(import_string(extension)) if InternationalizationExtension not in extensions: extensions.add(InternationalizationExtension) def getbool(options, key, default=False): return options.get(key, str(default)).lower() in ("1", "on", "yes", "true") silent = getbool(options, "silent", True) environment = Environment( options.get("block_start_string", BLOCK_START_STRING), options.get("block_end_string", BLOCK_END_STRING), options.get("variable_start_string", VARIABLE_START_STRING), options.get("variable_end_string", VARIABLE_END_STRING), options.get("comment_start_string", COMMENT_START_STRING), options.get("comment_end_string", COMMENT_END_STRING), options.get("line_statement_prefix") or LINE_STATEMENT_PREFIX, options.get("line_comment_prefix") or LINE_COMMENT_PREFIX, getbool(options, "trim_blocks", TRIM_BLOCKS), getbool(options, "lstrip_blocks", LSTRIP_BLOCKS), NEWLINE_SEQUENCE, getbool(options, "keep_trailing_newline", KEEP_TRAILING_NEWLINE), frozenset(extensions), cache_size=0, auto_reload=False, ) if getbool(options, "trimmed"): environment.policies["ext.i18n.trimmed"] = True if getbool(options, "newstyle_gettext"): environment.newstyle_gettext = True source = fileobj.read().decode(options.get("encoding", "utf-8")) try: node = environment.parse(source) tokens = list(environment.lex(environment.preprocess(source))) except TemplateSyntaxError: if not silent: raise # skip templates with syntax errors return finder = _CommentFinder(tokens, comment_tags) for lineno, func, message in extract_from_ast(node, keywords): yield lineno, func, message, finder.find_comments(lineno) #: nicer import names i18n = InternationalizationExtension do = ExprStmtExtension loopcontrols = LoopControlExtension with_ = WithExtension autoescape = AutoEscapeExtension debug = DebugExtension
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/meta.py
# -*- coding: utf-8 -*- """Functions that expose information about templates that might be interesting for introspection. """ from . import nodes from ._compat import iteritems from ._compat import string_types from .compiler import CodeGenerator class TrackingCodeGenerator(CodeGenerator): """We abuse the code generator for introspection.""" def __init__(self, environment): CodeGenerator.__init__(self, environment, "<introspection>", "<introspection>") self.undeclared_identifiers = set() def write(self, x): """Don't write.""" def enter_frame(self, frame): """Remember all undeclared identifiers.""" CodeGenerator.enter_frame(self, frame) for _, (action, param) in iteritems(frame.symbols.loads): if action == "resolve" and param not in self.environment.globals: self.undeclared_identifiers.add(param) def find_undeclared_variables(ast): """Returns a set of all variables in the AST that will be looked up from the context at runtime. Because at compile time it's not known which variables will be used depending on the path the execution takes at runtime, all variables are returned. >>> from jinja2 import Environment, meta >>> env = Environment() >>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}') >>> meta.find_undeclared_variables(ast) == set(['bar']) True .. admonition:: Implementation Internally the code generator is used for finding undeclared variables. This is good to know because the code generator might raise a :exc:`TemplateAssertionError` during compilation and as a matter of fact this function can currently raise that exception as well. """ codegen = TrackingCodeGenerator(ast.environment) codegen.visit(ast) return codegen.undeclared_identifiers def find_referenced_templates(ast): """Finds all the referenced templates from the AST. This will return an iterator over all the hardcoded template extensions, inclusions and imports. If dynamic inheritance or inclusion is used, `None` will be yielded. >>> from jinja2 import Environment, meta >>> env = Environment() >>> ast = env.parse('{% extends "layout.html" %}{% include helper %}') >>> list(meta.find_referenced_templates(ast)) ['layout.html', None] This function is useful for dependency tracking. For example if you want to rebuild parts of the website after a layout template has changed. """ for node in ast.find_all( (nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include) ): if not isinstance(node.template, nodes.Const): # a tuple with some non consts in there if isinstance(node.template, (nodes.Tuple, nodes.List)): for template_name in node.template.items: # something const, only yield the strings and ignore # non-string consts that really just make no sense if isinstance(template_name, nodes.Const): if isinstance(template_name.value, string_types): yield template_name.value # something dynamic in there else: yield None # something dynamic we don't know about here else: yield None continue # constant is a basestring, direct template name if isinstance(node.template.value, string_types): yield node.template.value # a tuple or list (latter *should* not happen) made of consts, # yield the consts that are strings. We could warn here for # non string values elif isinstance(node, nodes.Include) and isinstance( node.template.value, (tuple, list) ): for template_name in node.template.value: if isinstance(template_name, string_types): yield template_name # something else we don't care about, we could warn here else: yield None
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/jinja2/asyncfilters.py
from functools import wraps from . import filters from .asyncsupport import auto_aiter from .asyncsupport import auto_await async def auto_to_seq(value): seq = [] if hasattr(value, "__aiter__"): async for item in value: seq.append(item) else: for item in value: seq.append(item) return seq async def async_select_or_reject(args, kwargs, modfunc, lookup_attr): seq, func = filters.prepare_select_or_reject(args, kwargs, modfunc, lookup_attr) if seq: async for item in auto_aiter(seq): if func(item): yield item def dualfilter(normal_filter, async_filter): wrap_evalctx = False if getattr(normal_filter, "environmentfilter", False) is True: def is_async(args): return args[0].is_async wrap_evalctx = False else: has_evalctxfilter = getattr(normal_filter, "evalcontextfilter", False) is True has_ctxfilter = getattr(normal_filter, "contextfilter", False) is True wrap_evalctx = not has_evalctxfilter and not has_ctxfilter def is_async(args): return args[0].environment.is_async @wraps(normal_filter) def wrapper(*args, **kwargs): b = is_async(args) if wrap_evalctx: args = args[1:] if b: return async_filter(*args, **kwargs) return normal_filter(*args, **kwargs) if wrap_evalctx: wrapper.evalcontextfilter = True wrapper.asyncfiltervariant = True return wrapper def asyncfiltervariant(original): def decorator(f): return dualfilter(original, f) return decorator @asyncfiltervariant(filters.do_first) async def do_first(environment, seq): try: return await auto_aiter(seq).__anext__() except StopAsyncIteration: return environment.undefined("No first item, sequence was empty.") @asyncfiltervariant(filters.do_groupby) async def do_groupby(environment, value, attribute): expr = filters.make_attrgetter(environment, attribute) return [ filters._GroupTuple(key, await auto_to_seq(values)) for key, values in filters.groupby( sorted(await auto_to_seq(value), key=expr), expr ) ] @asyncfiltervariant(filters.do_join) async def do_join(eval_ctx, value, d=u"", attribute=None): return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute) @asyncfiltervariant(filters.do_list) async def do_list(value): return await auto_to_seq(value) @asyncfiltervariant(filters.do_reject) async def do_reject(*args, **kwargs): return async_select_or_reject(args, kwargs, lambda x: not x, False) @asyncfiltervariant(filters.do_rejectattr) async def do_rejectattr(*args, **kwargs): return async_select_or_reject(args, kwargs, lambda x: not x, True) @asyncfiltervariant(filters.do_select) async def do_select(*args, **kwargs): return async_select_or_reject(args, kwargs, lambda x: x, False) @asyncfiltervariant(filters.do_selectattr) async def do_selectattr(*args, **kwargs): return async_select_or_reject(args, kwargs, lambda x: x, True) @asyncfiltervariant(filters.do_map) async def do_map(*args, **kwargs): seq, func = filters.prepare_map(args, kwargs) if seq: async for item in auto_aiter(seq): yield await auto_await(func(item)) @asyncfiltervariant(filters.do_sum) async def do_sum(environment, iterable, attribute=None, start=0): rv = start if attribute is not None: func = filters.make_attrgetter(environment, attribute) else: def func(x): return x async for item in auto_aiter(iterable): rv += func(item) return rv @asyncfiltervariant(filters.do_slice) async def do_slice(value, slices, fill_with=None): return filters.do_slice(await auto_to_seq(value), slices, fill_with) ASYNC_FILTERS = { "first": do_first, "groupby": do_groupby, "join": do_join, "list": do_list, # we intentionally do not support do_last because that would be # ridiculous "reject": do_reject, "rejectattr": do_rejectattr, "map": do_map, "select": do_select, "selectattr": do_selectattr, "sum": do_sum, "slice": do_slice, }
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pycodestyle-2.7.0.dist-info/RECORD
../../Scripts/pycodestyle.exe,sha256=fdbGTYHwCLCxptp601QtYwNwlJUqKY7-_m8PQ_sQTDA,106414 __pycache__/pycodestyle.cpython-39.pyc,, pycodestyle-2.7.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 pycodestyle-2.7.0.dist-info/LICENSE,sha256=93IpXoGvNHjTTojlLQdiACMOx91qOeEjvFyzWqZqva4,1254 pycodestyle-2.7.0.dist-info/METADATA,sha256=WdOOoSuCvtYW3th4O3eKe-d995kHd_QNbJVNxVFjAQA,30597 pycodestyle-2.7.0.dist-info/RECORD,, pycodestyle-2.7.0.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110 pycodestyle-2.7.0.dist-info/entry_points.txt,sha256=6JU_7SAppC93MBSQi1_QxDwEQUyg6cgK71ab9q_Hxco,51 pycodestyle-2.7.0.dist-info/namespace_packages.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 pycodestyle-2.7.0.dist-info/top_level.txt,sha256=rHbIEiXmvsJ016mFcLVcF_d-dKgP3VdfOB6CWbivZug,12 pycodestyle.py,sha256=hnconAdzFCx6bjc3KWR66Aa5HCHWfW9TeloG2r5aRx8,104395
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pycodestyle-2.7.0.dist-info/LICENSE
Copyright © 2006-2009 Johann C. Rocholl <johann@rocholl.net> Copyright © 2009-2014 Florent Xicluna <florent.xicluna@gmail.com> Copyright © 2014-2020 Ian Lee <IanLee1521@gmail.com> Licensed under the terms of the Expat License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pycodestyle-2.7.0.dist-info/namespace_packages.txt
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pycodestyle-2.7.0.dist-info/WHEEL
Wheel-Version: 1.0 Generator: bdist_wheel (0.36.2) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pycodestyle-2.7.0.dist-info/entry_points.txt
[console_scripts] pycodestyle = pycodestyle:_main
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pycodestyle-2.7.0.dist-info/top_level.txt
pycodestyle
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pycodestyle-2.7.0.dist-info/INSTALLER
pip
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pycodestyle-2.7.0.dist-info/METADATA
Metadata-Version: 2.1 Name: pycodestyle Version: 2.7.0 Summary: Python style guide checker Home-page: https://pycodestyle.pycqa.org/ Author: Johann C. Rocholl Author-email: johann@rocholl.net Maintainer: Ian Lee Maintainer-email: IanLee1521@gmail.com License: Expat license Project-URL: Changes, https://pycodestyle.pycqa.org/en/latest/developer.html#changes Keywords: pycodestyle,pep8,PEP 8,PEP-8,PEP8 Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: Console Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Software Development :: Libraries :: Python Modules Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* pycodestyle (formerly called pep8) - Python style guide checker =============================================================== .. image:: https://img.shields.io/travis/PyCQA/pycodestyle.svg :target: https://travis-ci.org/PyCQA/pycodestyle :alt: Build status .. image:: https://readthedocs.org/projects/pycodestyle/badge/?version=latest :target: https://pycodestyle.pycqa.org :alt: Documentation Status .. image:: https://img.shields.io/pypi/wheel/pycodestyle.svg :target: https://pypi.org/project/pycodestyle/ :alt: Wheel Status .. image:: https://badges.gitter.im/PyCQA/pycodestyle.svg :alt: Join the chat at https://gitter.im/PyCQA/pycodestyle :target: https://gitter.im/PyCQA/pycodestyle?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge pycodestyle is a tool to check your Python code against some of the style conventions in `PEP 8`_. .. _PEP 8: http://www.python.org/dev/peps/pep-0008/ .. note:: This package used to be called ``pep8`` but was renamed to ``pycodestyle`` to reduce confusion. Further discussion can be found `in the issue where Guido requested this change <https://github.com/PyCQA/pycodestyle/issues/466>`_, or in the lightning talk at PyCon 2016 by @IanLee1521: `slides <https://speakerdeck.com/ianlee1521/pep8-vs-pep-8>`_ `video <https://youtu.be/PulzIT8KYLk?t=36m>`_. Features -------- * Plugin architecture: Adding new checks is easy. * Parseable output: Jump to error location in your editor. * Small: Just one Python file, requires only stdlib. You can use just the ``pycodestyle.py`` file for this purpose. * Comes with a comprehensive test suite. Installation ------------ You can install, upgrade, and uninstall ``pycodestyle.py`` with these commands:: $ pip install pycodestyle $ pip install --upgrade pycodestyle $ pip uninstall pycodestyle There's also a package for Debian/Ubuntu, but it's not always the latest version. Example usage and output ------------------------ :: $ pycodestyle --first optparse.py optparse.py:69:11: E401 multiple imports on one line optparse.py:77:1: E302 expected 2 blank lines, found 1 optparse.py:88:5: E301 expected 1 blank line, found 0 optparse.py:222:34: W602 deprecated form of raising exception optparse.py:347:31: E211 whitespace before '(' optparse.py:357:17: E201 whitespace after '{' optparse.py:472:29: E221 multiple spaces before operator optparse.py:544:21: W601 .has_key() is deprecated, use 'in' You can also make ``pycodestyle.py`` show the source code for each error, and even the relevant text from PEP 8:: $ pycodestyle --show-source --show-pep8 testsuite/E40.py testsuite/E40.py:2:10: E401 multiple imports on one line import os, sys ^ Imports should usually be on separate lines. Okay: import os\nimport sys E401: import sys, os Or you can display how often each error was found:: $ pycodestyle --statistics -qq Python-2.5/Lib 232 E201 whitespace after '[' 599 E202 whitespace before ')' 631 E203 whitespace before ',' 842 E211 whitespace before '(' 2531 E221 multiple spaces before operator 4473 E301 expected 1 blank line, found 0 4006 E302 expected 2 blank lines, found 1 165 E303 too many blank lines (4) 325 E401 multiple imports on one line 3615 E501 line too long (82 characters) 612 W601 .has_key() is deprecated, use 'in' 1188 W602 deprecated form of raising exception Links ----- * `Read the documentation <https://pycodestyle.pycqa.org/>`_ * `Fork me on GitHub <http://github.com/PyCQA/pycodestyle>`_ Changelog ========= 2.7.0 (2021-03-14) ------------------ Changes: * Fix physical checks (such as W191) at end of file. PR #961. * Add ``--indent-size`` option (defaulting to ``4``). PR #970. * W605: fix escaped crlf false positive on windows. PR #976. 2.6.0 (2020-05-11) ------------------ Announcements: * Anthony Sottile (@asottile) joined the team as a core developer. :tada: Changes: * E306: fix detection inside ``async def``. PR #929. * E301: fix regression disallowing decorated one-liners. PR #927. * E714: fix false positive with chained ``is not``. PR #931. 2.6.0a1 (2020-04-23) -------------------- New checks: * E225: require whitespace around ``and`` ``in`` ``is`` and ``or``. PR #847. Changes: * E117: fix indentation using tabs by treating as 8-space indents. PR #837. * E721: fix false positive with names containg ``istype``. PR #850. * E741: allow ``l`` as a named argument in a function call. PR #853. * E302: fix false-negative with decorated functions. PR #859. * W504: ellipsis (``...``) is no longer treated as a binary operator. PR #875. * E402: allow ``with``, ``if``, ``elif``, ``else`` to guard imports. PR #834. * Add support for assignment expressions ``:=`` (PEP 572). PR #879. * Add support for positional-only arguments ``/`` (PEP 570). PR #872, #918. * Add support for python 3.8. * Add support for matrix multiplication operator ``@`` (PEP 465). PR #897. * Support visual indent for continuation lines for ``with`` / ``assert`` / ``raise``. PR #912. * E302: allow two blank lines after a block of one-liners. PR #913. * E302: allow two-and-fewer newlines at the top of the file. PR #919. 2.5.0 (2019-01-29) ------------------ New checks: * E117: Over-indented code blocks * W505: Maximum doc-string length only when configured with --max-doc-length Changes: * Remove support for EOL Python 2.6 and 3.3. PR #720. * Add E117 error for over-indented code blocks. * Allow W605 to be silenced by `# noqa` and fix the position reported by W605 * Allow users to omit blank lines around one-liner definitions of classes and functions * Include the function return annotation (``->``) as requiring surrounding whitespace only on Python 3 * Verify that only names can follow ``await``. Previously we allowed numbers and strings. * Add support for Python 3.7 * Fix detection of annotated argument defaults for E252 * Correct the position reported by W504 2.4.0 (2018-04-10) ------------------ New checks: * Add W504 warning for checking that a break doesn't happen after a binary operator. This check is ignored by default. PR #502. * Add W605 warning for invalid escape sequences in string literals. PR #676. * Add W606 warning for 'async' and 'await' reserved keywords being introduced in Python 3.7. PR #684. * Add E252 error for missing whitespace around equal sign in type annotated function arguments with defaults values. PR #717. Changes: * An internal bisect search has replaced a linear search in order to improve efficiency. PR #648. * pycodestyle now uses PyPI trove classifiers in order to document supported python versions on PyPI. PR #654. * 'setup.cfg' '[wheel]' section has been renamed to '[bdist_wheel]', as the former is legacy. PR #653. * pycodestyle now handles very long lines much more efficiently for python 3.2+. Fixes #643. PR #644. * You can now write 'pycodestyle.StyleGuide(verbose=True)' instead of 'pycodestyle.StyleGuide(verbose=True, paths=['-v'])' in order to achieve verbosity. PR #663. * The distribution of pycodestyle now includes the license text in order to comply with open source licenses which require this. PR #694. * 'maximum_line_length' now ignores shebang ('#!') lines. PR #736. * Add configuration option for the allowed number of blank lines. It is implemented as a top level dictionary which can be easily overwritten. Fixes #732. PR #733. Bugs: * Prevent a 'DeprecationWarning', and a 'SyntaxError' in future python, caused by an invalid escape sequence. PR #625. * Correctly report E501 when the first line of a docstring is too long. Resolves #622. PR #630. * Support variable annotation when variable start by a keyword, such as class variable type annotations in python 3.6. PR #640. * pycodestyle internals have been changed in order to allow 'python3 -m cProfile' to report correct metrics. PR #647. * Fix a spelling mistake in the description of E722. PR #697. * 'pycodestyle --diff' now does not break if your 'gitconfig' enables 'mnemonicprefix'. PR #706. 2.3.1 (2017-01-31) ------------------ Bugs: * Fix regression in detection of E302 and E306; #618, #620 2.3.0 (2017-01-30) ------------------ New Checks: * Add E722 warning for bare ``except`` clauses * Report E704 for async function definitions (``async def``) Bugs: * Fix another E305 false positive for variables beginning with "class" or "def" * Fix detection of multiple spaces between ``async`` and ``def`` * Fix handling of variable annotations. Stop reporting E701 on Python 3.6 for variable annotations. 2.2.0 (2016-11-14) ------------------ Announcements: * Added Make target to obtain proper tarball file permissions; #599 Bugs: * Fixed E305 regression caused by #400; #593 2.1.0 (2016-11-04) ------------------ Announcements: * Change all references to the pep8 project to say pycodestyle; #530 Changes: * Report E302 for blank lines before an "async def"; #556 * Update our list of tested and supported Python versions which are 2.6, 2.7, 3.2, 3.3, 3.4 and 3.5 as well as the nightly Python build and PyPy. * Report E742 and E743 for functions and classes badly named 'l', 'O', or 'I'. * Report E741 on 'global' and 'nonlocal' statements, as well as prohibited single-letter variables. * Deprecated use of `[pep8]` section name in favor of `[pycodestyle]`; #591 * Report E722 when bare except clause is used; #579 Bugs: * Fix opt_type AssertionError when using Flake8 2.6.2 and pycodestyle; #561 * Require two blank lines after toplevel def, class; #536 * Remove accidentally quadratic computation based on the number of colons. This will make pycodestyle faster in some cases; #314 2.0.0 (2016-05-31) ------------------ Announcements: * Repository renamed to `pycodestyle`; Issue #466 / #481. * Added joint Code of Conduct as member of PyCQA; #483 Changes: * Added tox test support for Python 3.5 and pypy3 * Added check E275 for whitespace on `from ... import ...` lines; #489 / #491 * Added W503 to the list of codes ignored by default ignore list; #498 * Removed use of project level `.pep8` configuration file; #364 Bugs: * Fixed bug with treating `~` operator as binary; #383 / #384 * Identify binary operators as unary; #484 / #485 1.7.0 (2016-01-12) ------------------ Announcements: * Repository moved to PyCQA Organization on GitHub: https://github.com/pycqa/pep8 Changes: * Reverted the fix in #368, "options passed on command line are only ones accepted" feature. This has many unintended consequences in pep8 and flake8 and needs to be reworked when I have more time. * Added support for Python 3.5. (Issue #420 & #459) * Added support for multi-line config_file option parsing. (Issue #429) * Improved parameter parsing. (Issues #420 & #456) Bugs: * Fixed BytesWarning on Python 3. (Issue #459) 1.6.2 (2015-02-15) ------------------ Changes: * Added check for breaking around a binary operator. (Issue #197, Pull #305) Bugs: * Restored config_file parameter in process_options(). (Issue #380) 1.6.1 (2015-02-08) ------------------ Changes: * Assign variables before referenced. (Issue #287) Bugs: * Exception thrown due to unassigned ``local_dir`` variable. (Issue #377) 1.6.0 (2015-02-06) ------------------ News: * Ian Lee <ianlee1521@gmail.com> joined the project as a maintainer. Changes: * Report E731 for lambda assignment. (Issue #277) * Report E704 for one-liner def instead of E701. Do not report this error in the default configuration. (Issue #277) * Replace codes E111, E112 and E113 with codes E114, E115 and E116 for bad indentation of comments. (Issue #274) * Report E266 instead of E265 when the block comment starts with multiple ``#``. (Issue #270) * Report E402 for import statements not at the top of the file. (Issue #264) * Do not enforce whitespaces around ``**`` operator. (Issue #292) * Strip whitespace from around paths during normalization. (Issue #339 / #343) * Update ``--format`` documentation. (Issue #198 / Pull Request #310) * Add ``.tox/`` to default excludes. (Issue #335) * Do not report E121 or E126 in the default configuration. (Issues #256 / #316) * Allow spaces around the equals sign in an annotated function. (Issue #357) * Allow trailing backslash if in an inline comment. (Issue #374) * If ``--config`` is used, only that configuration is processed. Otherwise, merge the user and local configurations are merged. (Issue #368 / #369) Bug fixes: * Don't crash if Checker.build_tokens_line() returns None. (Issue #306) * Don't crash if os.path.expanduser() throws an ImportError. (Issue #297) * Missing space around keyword parameter equal not always reported, E251. (Issue #323) * Fix false positive E711/E712/E713. (Issues #330 and #336) * Do not skip physical checks if the newline is escaped. (Issue #319) * Flush sys.stdout to avoid race conditions with printing. See flake8 bug: https://gitlab.com/pycqa/flake8/issues/17 for more details. (Issue #363) 1.5.7 (2014-05-29) ------------------ Bug fixes: * Skip the traceback on "Broken pipe" signal. (Issue #275) * Do not exit when an option in ``setup.cfg`` or ``tox.ini`` is not recognized. * Check the last line even if it does not end with a newline. (Issue #286) * Always open files in universal newlines mode in Python 2. (Issue #288) 1.5.6 (2014-04-14) ------------------ Bug fixes: * Check the last line even if it has no end-of-line. (Issue #273) 1.5.5 (2014-04-10) ------------------ Bug fixes: * Fix regression with E22 checks and inline comments. (Issue #271) 1.5.4 (2014-04-07) ------------------ Bug fixes: * Fix negative offset with E303 before a multi-line docstring. (Issue #269) 1.5.3 (2014-04-04) ------------------ Bug fixes: * Fix wrong offset computation when error is on the last char of a physical line. (Issue #268) 1.5.2 (2014-04-04) ------------------ Changes: * Distribute a universal wheel file. Bug fixes: * Report correct line number for E303 with comments. (Issue #60) * Do not allow newline after parameter equal. (Issue #252) * Fix line number reported for multi-line strings. (Issue #220) * Fix false positive E121/E126 with multi-line strings. (Issue #265) * Fix E501 not detected in comments with Python 2.5. * Fix caret position with ``--show-source`` when line contains tabs. 1.5.1 (2014-03-27) ------------------ Bug fixes: * Fix a crash with E125 on multi-line strings. (Issue #263) 1.5 (2014-03-26) ---------------- Changes: * Report E129 instead of E125 for visually indented line with same indent as next logical line. (Issue #126) * Report E265 for space before block comment. (Issue #190) * Report E713 and E714 when operators ``not in`` and ``is not`` are recommended. (Issue #236) * Allow long lines in multiline strings and comments if they cannot be wrapped. (Issue #224). * Optionally disable physical line checks inside multiline strings, using ``# noqa``. (Issue #242) * Change text for E121 to report "continuation line under-indented for hanging indent" instead of indentation not being a multiple of 4. * Report E131 instead of E121 / E126 if the hanging indent is not consistent within the same continuation block. It helps when error E121 or E126 is in the ``ignore`` list. * Report E126 instead of E121 when the continuation line is hanging with extra indentation, even if indentation is not a multiple of 4. Bug fixes: * Allow the checkers to report errors on empty files. (Issue #240) * Fix ignoring too many checks when ``--select`` is used with codes declared in a flake8 extension. (Issue #216) * Fix regression with multiple brackets. (Issue #214) * Fix ``StyleGuide`` to parse the local configuration if the keyword argument ``paths`` is specified. (Issue #246) * Fix a false positive E124 for hanging indent. (Issue #254) * Fix a false positive E126 with embedded colon. (Issue #144) * Fix a false positive E126 when indenting with tabs. (Issue #204) * Fix behaviour when ``exclude`` is in the configuration file and the current directory is not the project directory. (Issue #247) * The logical checks can return ``None`` instead of an empty iterator. (Issue #250) * Do not report multiple E101 if only the first indentation starts with a tab. (Issue #237) * Fix a rare false positive W602. (Issue #34) 1.4.6 (2013-07-02) ------------------ Changes: * Honor ``# noqa`` for errors E711 and E712. (Issue #180) * When both a ``tox.ini`` and a ``setup.cfg`` are present in the project directory, merge their contents. The ``tox.ini`` file takes precedence (same as before). (Issue #182) * Give priority to ``--select`` over ``--ignore``. (Issue #188) * Compare full path when excluding a file. (Issue #186) * New option ``--hang-closing`` to switch to the alternative style of closing bracket indentation for hanging indent. Add error E133 for closing bracket which is missing indentation. (Issue #103) * Accept both styles of closing bracket indentation for hanging indent. Do not report error E123 in the default configuration. (Issue #103) Bug fixes: * Do not crash when running AST checks and the document contains null bytes. (Issue #184) * Correctly report other E12 errors when E123 is ignored. (Issue #103) * Fix false positive E261/E262 when the file contains a BOM. (Issue #193) * Fix E701, E702 and E703 not detected sometimes. (Issue #196) * Fix E122 not detected in some cases. (Issue #201 and #208) * Fix false positive E121 with multiple brackets. (Issue #203) 1.4.5 (2013-03-06) ------------------ * When no path is specified, do not try to read from stdin. The feature was added in 1.4.3, but it is not supported on Windows. Use ``-`` filename argument to read from stdin. This usage is supported since 1.3.4. (Issue #170) * Do not require ``setuptools`` in setup.py. It works around an issue with ``pip`` and Python 3. (Issue #172) * Add ``__pycache__`` to the ignore list. * Change misleading message for E251. (Issue #171) * Do not report false E302 when the source file has a coding cookie or a comment on the first line. (Issue #174) * Reorganize the tests and add tests for the API and for the command line usage and options. (Issues #161 and #162) * Ignore all checks which are not explicitly selected when ``select`` is passed to the ``StyleGuide`` constructor. 1.4.4 (2013-02-24) ------------------ * Report E227 or E228 instead of E225 for whitespace around bitwise, shift or modulo operators. (Issue #166) * Change the message for E226 to make clear that it is about arithmetic operators. * Fix a false positive E128 for continuation line indentation with tabs. * Fix regression with the ``--diff`` option. (Issue #169) * Fix the ``TestReport`` class to print the unexpected warnings and errors. 1.4.3 (2013-02-22) ------------------ * Hide the ``--doctest`` and ``--testsuite`` options when installed. * Fix crash with AST checkers when the syntax is invalid. (Issue #160) * Read from standard input if no path is specified. * Initiate a graceful shutdown on ``Control+C``. * Allow changing the ``checker_class`` for the ``StyleGuide``. 1.4.2 (2013-02-10) ------------------ * Support AST checkers provided by third-party applications. * Register new checkers with ``register_check(func_or_cls, codes)``. * Allow constructing a ``StyleGuide`` with a custom parser. * Accept visual indentation without parenthesis after the ``if`` statement. (Issue #151) * Fix UnboundLocalError when using ``# noqa`` with continued lines. (Issue #158) * Re-order the lines for the ``StandardReport``. * Expand tabs when checking E12 continuation lines. (Issue #155) * Refactor the testing class ``TestReport`` and the specific test functions into a separate test module. 1.4.1 (2013-01-18) ------------------ * Allow sphinx.ext.autodoc syntax for comments. (Issue #110) * Report E703 instead of E702 for the trailing semicolon. (Issue #117) * Honor ``# noqa`` in addition to ``# nopep8``. (Issue #149) * Expose the ``OptionParser`` factory for better extensibility. 1.4 (2012-12-22) ---------------- * Report E226 instead of E225 for optional whitespace around common operators (``*``, ``**``, ``/``, ``+`` and ``-``). This new error code is ignored in the default configuration because PEP 8 recommends to "use your own judgement". (Issue #96) * Lines with a ``# nopep8`` at the end will not issue errors on line length E501 or continuation line indentation E12*. (Issue #27) * Fix AssertionError when the source file contains an invalid line ending ``"\r\r\n"``. (Issue #119) * Read the ``[pep8]`` section of ``tox.ini`` or ``setup.cfg`` if present. (Issue #93 and #141) * Add the Sphinx-based documentation, and publish it on https://pycodestyle.readthedocs.io/. (Issue #105) 1.3.4 (2012-12-18) ------------------ * Fix false positive E124 and E128 with comments. (Issue #100) * Fix error on stdin when running with bpython. (Issue #101) * Fix false positive E401. (Issue #104) * Report E231 for nested dictionary in list. (Issue #142) * Catch E271 at the beginning of the line. (Issue #133) * Fix false positive E126 for multi-line comments. (Issue #138) * Fix false positive E221 when operator is preceded by a comma. (Issue #135) * Fix ``--diff`` failing on one-line hunk. (Issue #137) * Fix the ``--exclude`` switch for directory paths. (Issue #111) * Use ``-`` filename to read from standard input. (Issue #128) 1.3.3 (2012-06-27) ------------------ * Fix regression with continuation line checker. (Issue #98) 1.3.2 (2012-06-26) ------------------ * Revert to the previous behaviour for ``--show-pep8``: do not imply ``--first``. (Issue #89) * Add E902 for IO errors. (Issue #87) * Fix false positive for E121, and missed E124. (Issue #92) * Set a sensible default path for config file on Windows. (Issue #95) * Allow ``verbose`` in the configuration file. (Issue #91) * Show the enforced ``max-line-length`` in the error message. (Issue #86) 1.3.1 (2012-06-18) ------------------ * Explain which configuration options are expected. Accept and recommend the options names with hyphen instead of underscore. (Issue #82) * Do not read the user configuration when used as a module (except if ``config_file=True`` is passed to the ``StyleGuide`` constructor). * Fix wrong or missing cases for the E12 series. * Fix cases where E122 was missed. (Issue #81) 1.3 (2012-06-15) ---------------- .. warning:: The internal API is backwards incompatible. * Remove global configuration and refactor the library around a ``StyleGuide`` class; add the ability to configure various reporters. (Issue #35 and #66) * Read user configuration from ``~/.config/pep8`` and local configuration from ``./.pep8``. (Issue #22) * Fix E502 for backslash embedded in multi-line string. (Issue #68) * Fix E225 for Python 3 iterable unpacking (PEP 3132). (Issue #72) * Enable the new checkers from the E12 series in the default configuration. * Suggest less error-prone alternatives for E712 errors. * Rewrite checkers to run faster (E22, E251, E27). * Fixed a crash when parsed code is invalid (too many closing brackets). * Fix E127 and E128 for continuation line indentation. (Issue #74) * New option ``--format`` to customize the error format. (Issue #23) * New option ``--diff`` to check only modified code. The unified diff is read from STDIN. Example: ``hg diff | pep8 --diff`` (Issue #39) * Correctly report the count of failures and set the exit code to 1 when the ``--doctest`` or the ``--testsuite`` fails. * Correctly detect the encoding in Python 3. (Issue #69) * Drop support for Python 2.3, 2.4 and 3.0. (Issue #78) 1.2 (2012-06-01) ---------------- * Add E121 through E128 for continuation line indentation. These checks are disabled by default. If you want to force all checks, use switch ``--select=E,W``. Patch by Sam Vilain. (Issue #64) * Add E721 for direct type comparisons. (Issue #47) * Add E711 and E712 for comparisons to singletons. (Issue #46) * Fix spurious E225 and E701 for function annotations. (Issue #29) * Add E502 for explicit line join between brackets. * Fix E901 when printing source with ``--show-source``. * Report all errors for each checker, instead of reporting only the first occurrence for each line. * Option ``--show-pep8`` implies ``--first``. 1.1 (2012-05-24) ---------------- * Add E901 for syntax errors. (Issues #63 and #30) * Add E271, E272, E273 and E274 for extraneous whitespace around keywords. (Issue #57) * Add ``tox.ini`` configuration file for tests. (Issue #61) * Add ``.travis.yml`` configuration file for continuous integration. (Issue #62) 1.0.1 (2012-04-06) ------------------ * Fix inconsistent version numbers. 1.0 (2012-04-04) ---------------- * Fix W602 ``raise`` to handle multi-char names. (Issue #53) 0.7.0 (2012-03-26) ------------------ * Now ``--first`` prints only the first occurrence of each error. The ``--repeat`` flag becomes obsolete because it is the default behaviour. (Issue #6) * Allow specifying ``--max-line-length``. (Issue #36) * Make the shebang more flexible. (Issue #26) * Add testsuite to the bundle. (Issue #25) * Fixes for Jython. (Issue #49) * Add PyPI classifiers. (Issue #43) * Fix the ``--exclude`` option. (Issue #48) * Fix W602, accept ``raise`` with 3 arguments. (Issue #34) * Correctly select all tests if ``DEFAULT_IGNORE == ''``. 0.6.1 (2010-10-03) ------------------ * Fix inconsistent version numbers. (Issue #21) 0.6.0 (2010-09-19) ------------------ * Test suite reorganized and enhanced in order to check more failures with fewer test files. Read the ``run_tests`` docstring for details about the syntax. * Fix E225: accept ``print >>sys.stderr, "..."`` syntax. * Fix E501 for lines containing multibyte encoded characters. (Issue #7) * Fix E221, E222, E223, E224 not detected in some cases. (Issue #16) * Fix E211 to reject ``v = dic['a'] ['b']``. (Issue #17) * Exit code is always 1 if any error or warning is found. (Issue #10) * ``--ignore`` checks are now really ignored, especially in conjunction with ``--count``. (Issue #8) * Blank lines with spaces yield W293 instead of W291: some developers want to ignore this warning and indent the blank lines to paste their code easily in the Python interpreter. * Fix E301: do not require a blank line before an indented block. (Issue #14) * Fix E203 to accept NumPy slice notation ``a[0, :]``. (Issue #13) * Performance improvements. * Fix decoding and checking non-UTF8 files in Python 3. * Fix E225: reject ``True+False`` when running on Python 3. * Fix an exception when the line starts with an operator. * Allow a new line before closing ``)``, ``}`` or ``]``. (Issue #5) 0.5.0 (2010-02-17) ------------------ * Changed the ``--count`` switch to print to sys.stderr and set exit code to 1 if any error or warning is found. * E241 and E242 are removed from the standard checks. If you want to include these checks, use switch ``--select=E,W``. (Issue #4) * Blank line is not mandatory before the first class method or nested function definition, even if there's a docstring. (Issue #1) * Add the switch ``--version``. * Fix decoding errors with Python 3. (Issue #13 [1]_) * Add ``--select`` option which is mirror of ``--ignore``. * Add checks E261 and E262 for spaces before inline comments. * New check W604 warns about deprecated usage of backticks. * New check W603 warns about the deprecated operator ``<>``. * Performance improvement, due to rewriting of E225. * E225 now accepts: - no whitespace after unary operator or similar. (Issue #9 [1]_) - lambda function with argument unpacking or keyword defaults. * Reserve "2 blank lines" for module-level logical blocks. (E303) * Allow multi-line comments. (E302, issue #10 [1]_) 0.4.2 (2009-10-22) ------------------ * Decorators on classes and class methods are OK now. 0.4 (2009-10-20) ---------------- * Support for all versions of Python from 2.3 to 3.1. * New and greatly expanded self tests. * Added ``--count`` option to print the total number of errors and warnings. * Further improvements to the handling of comments and blank lines. (Issue #1 [1]_ and others changes.) * Check all py files in directory when passed a directory (Issue #2 [1]_). This also prevents an exception when traversing directories with non ``*.py`` files. * E231 should allow commas to be followed by ``)``. (Issue #3 [1]_) * Spaces are no longer required around the equals sign for keyword arguments or default parameter values. .. [1] These issues refer to the `previous issue tracker`__. .. __: http://github.com/cburroughs/pep8.py/issues 0.3.1 (2009-09-14) ------------------ * Fixes for comments: do not count them when checking for blank lines between items. * Added setup.py for pypi upload and easy_installability. 0.2 (2007-10-16) ---------------- * Loads of fixes and improvements. 0.1 (2006-10-01) ---------------- * First release.
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/MarkupSafe-1.1.1.dist-info/RECORD
MarkupSafe-1.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 MarkupSafe-1.1.1.dist-info/LICENSE.rst,sha256=RjHsDbX9kKVH4zaBcmTGeYIUM4FG-KyUtKV_lu6MnsQ,1503 MarkupSafe-1.1.1.dist-info/METADATA,sha256=-XXnVvCxQP2QbHutIQq_7Pk9OATy-x0NC7gN_3_SCRE,3167 MarkupSafe-1.1.1.dist-info/RECORD,, MarkupSafe-1.1.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 MarkupSafe-1.1.1.dist-info/WHEEL,sha256=jr7ubY0Lkz_yXH9FfFe9PTtLhGOsf62dZkNvTYrJINE,100 MarkupSafe-1.1.1.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11 markupsafe/__init__.py,sha256=UAy1UKlykemnSZWIVn8RDqY0wvjV6lkeRwYOMNhw4bA,10453 markupsafe/__pycache__/__init__.cpython-39.pyc,, markupsafe/__pycache__/_compat.cpython-39.pyc,, markupsafe/__pycache__/_constants.cpython-39.pyc,, markupsafe/__pycache__/_native.cpython-39.pyc,, markupsafe/_compat.py,sha256=XweNhJEcyTP_wIBUaIO6nxzIb6XFwweriXyZfiTpkdw,591 markupsafe/_constants.py,sha256=IXLUQkLM6CTustG5vEQTEy6pBB3z5pm84NkYU1aW9qI,4954 markupsafe/_native.py,sha256=LwsYk-GHoPsPboRD_tNC6_jTmCj3MLtsnDFis7HjE50,1942 markupsafe/_speedups.cp39-win_amd64.pyd,sha256=-q4bNNv41CqQoNIaElFJdVt3KWe4OYhccM6G2bJ_SMo,15360
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/MarkupSafe-1.1.1.dist-info/WHEEL
Wheel-Version: 1.0 Generator: bdist_wheel (0.36.2) Root-Is-Purelib: false Tag: cp39-cp39-win_amd64
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/MarkupSafe-1.1.1.dist-info/LICENSE.rst
Copyright 2010 Pallets Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/MarkupSafe-1.1.1.dist-info/top_level.txt
markupsafe
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/MarkupSafe-1.1.1.dist-info/INSTALLER
pip
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/MarkupSafe-1.1.1.dist-info/METADATA
Metadata-Version: 2.1 Name: MarkupSafe Version: 1.1.1 Summary: Safely add untrusted strings to HTML/XML markup. Home-page: https://palletsprojects.com/p/markupsafe/ Author: Armin Ronacher Author-email: armin.ronacher@active-4.com Maintainer: The Pallets Team Maintainer-email: contact@palletsprojects.com License: BSD-3-Clause Project-URL: Documentation, https://markupsafe.palletsprojects.com/ Project-URL: Code, https://github.com/pallets/markupsafe Project-URL: Issue tracker, https://github.com/pallets/markupsafe/issues Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: Web Environment Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 3 Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Topic :: Text Processing :: Markup :: HTML Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.* Description-Content-Type: text/x-rst MarkupSafe ========== MarkupSafe implements a text object that escapes characters so it is safe to use in HTML and XML. Characters that have special meanings are replaced so that they display as the actual characters. This mitigates injection attacks, meaning untrusted user input can safely be displayed on a page. Installing ---------- Install and update using `pip`_: .. code-block:: text pip install -U MarkupSafe .. _pip: https://pip.pypa.io/en/stable/quickstart/ Examples -------- .. code-block:: pycon >>> from markupsafe import Markup, escape >>> # escape replaces special characters and wraps in Markup >>> escape('<script>alert(document.cookie);</script>') Markup(u'&lt;script&gt;alert(document.cookie);&lt;/script&gt;') >>> # wrap in Markup to mark text "safe" and prevent escaping >>> Markup('<strong>Hello</strong>') Markup('<strong>hello</strong>') >>> escape(Markup('<strong>Hello</strong>')) Markup('<strong>hello</strong>') >>> # Markup is a text subclass (str on Python 3, unicode on Python 2) >>> # methods and operators escape their arguments >>> template = Markup("Hello <em>%s</em>") >>> template % '"World"' Markup('Hello <em>&#34;World&#34;</em>') Donate ------ The Pallets organization develops and supports MarkupSafe and other libraries that use it. In order to grow the community of contributors and users, and allow the maintainers to devote more time to the projects, `please donate today`_. .. _please donate today: https://palletsprojects.com/donate Links ----- * Website: https://palletsprojects.com/p/markupsafe/ * Documentation: https://markupsafe.palletsprojects.com/ * Releases: https://pypi.org/project/MarkupSafe/ * Code: https://github.com/pallets/markupsafe * Issue tracker: https://github.com/pallets/markupsafe/issues * Test status: https://dev.azure.com/pallets/markupsafe/_build * Official chat: https://discord.gg/t6rrQZH
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/_reloader.py
import os import subprocess import sys import threading import time from itertools import chain from ._compat import iteritems from ._compat import PY2 from ._compat import text_type from ._internal import _log def _iter_module_files(): """This iterates over all relevant Python files. It goes through all loaded files from modules, all files in folders of already loaded modules as well as all files reachable through a package. """ # The list call is necessary on Python 3 in case the module # dictionary modifies during iteration. for module in list(sys.modules.values()): if module is None: continue filename = getattr(module, "__file__", None) if filename: if os.path.isdir(filename) and os.path.exists( os.path.join(filename, "__init__.py") ): filename = os.path.join(filename, "__init__.py") old = None while not os.path.isfile(filename): old = filename filename = os.path.dirname(filename) if filename == old: break else: if filename[-4:] in (".pyc", ".pyo"): filename = filename[:-1] yield filename def _find_observable_paths(extra_files=None): """Finds all paths that should be observed.""" rv = set( os.path.dirname(os.path.abspath(x)) if os.path.isfile(x) else os.path.abspath(x) for x in sys.path ) for filename in extra_files or (): rv.add(os.path.dirname(os.path.abspath(filename))) for module in list(sys.modules.values()): fn = getattr(module, "__file__", None) if fn is None: continue fn = os.path.abspath(fn) rv.add(os.path.dirname(fn)) return _find_common_roots(rv) def _get_args_for_reloading(): """Determine how the script was executed, and return the args needed to execute it again in a new process. """ rv = [sys.executable] py_script = sys.argv[0] args = sys.argv[1:] # Need to look at main module to determine how it was executed. __main__ = sys.modules["__main__"] # The value of __package__ indicates how Python was called. It may # not exist if a setuptools script is installed as an egg. It may be # set incorrectly for entry points created with pip on Windows. if getattr(__main__, "__package__", None) is None or ( os.name == "nt" and __main__.__package__ == "" and not os.path.exists(py_script) and os.path.exists(py_script + ".exe") ): # Executed a file, like "python app.py". py_script = os.path.abspath(py_script) if os.name == "nt": # Windows entry points have ".exe" extension and should be # called directly. if not os.path.exists(py_script) and os.path.exists(py_script + ".exe"): py_script += ".exe" if ( os.path.splitext(sys.executable)[1] == ".exe" and os.path.splitext(py_script)[1] == ".exe" ): rv.pop(0) rv.append(py_script) else: # Executed a module, like "python -m werkzeug.serving". if sys.argv[0] == "-m": # Flask works around previous behavior by putting # "-m flask" in sys.argv. # TODO remove this once Flask no longer misbehaves args = sys.argv else: if os.path.isfile(py_script): # Rewritten by Python from "-m script" to "/path/to/script.py". py_module = __main__.__package__ name = os.path.splitext(os.path.basename(py_script))[0] if name != "__main__": py_module += "." + name else: # Incorrectly rewritten by pydevd debugger from "-m script" to "script". py_module = py_script rv.extend(("-m", py_module.lstrip("."))) rv.extend(args) return rv def _find_common_roots(paths): """Out of some paths it finds the common roots that need monitoring.""" paths = [x.split(os.path.sep) for x in paths] root = {} for chunks in sorted(paths, key=len, reverse=True): node = root for chunk in chunks: node = node.setdefault(chunk, {}) node.clear() rv = set() def _walk(node, path): for prefix, child in iteritems(node): _walk(child, path + (prefix,)) if not node: rv.add("/".join(path)) _walk(root, ()) return rv class ReloaderLoop(object): name = None # monkeypatched by testsuite. wrapping with `staticmethod` is required in # case time.sleep has been replaced by a non-c function (e.g. by # `eventlet.monkey_patch`) before we get here _sleep = staticmethod(time.sleep) def __init__(self, extra_files=None, interval=1): self.extra_files = set(os.path.abspath(x) for x in extra_files or ()) self.interval = interval def run(self): pass def restart_with_reloader(self): """Spawn a new Python interpreter with the same arguments as this one, but running the reloader thread. """ while 1: _log("info", " * Restarting with %s" % self.name) args = _get_args_for_reloading() # a weird bug on windows. sometimes unicode strings end up in the # environment and subprocess.call does not like this, encode them # to latin1 and continue. if os.name == "nt" and PY2: new_environ = {} for key, value in iteritems(os.environ): if isinstance(key, text_type): key = key.encode("iso-8859-1") if isinstance(value, text_type): value = value.encode("iso-8859-1") new_environ[key] = value else: new_environ = os.environ.copy() new_environ["WERKZEUG_RUN_MAIN"] = "true" exit_code = subprocess.call(args, env=new_environ, close_fds=False) if exit_code != 3: return exit_code def trigger_reload(self, filename): self.log_reload(filename) sys.exit(3) def log_reload(self, filename): filename = os.path.abspath(filename) _log("info", " * Detected change in %r, reloading" % filename) class StatReloaderLoop(ReloaderLoop): name = "stat" def run(self): mtimes = {} while 1: for filename in chain(_iter_module_files(), self.extra_files): try: mtime = os.stat(filename).st_mtime except OSError: continue old_time = mtimes.get(filename) if old_time is None: mtimes[filename] = mtime continue elif mtime > old_time: self.trigger_reload(filename) self._sleep(self.interval) class WatchdogReloaderLoop(ReloaderLoop): def __init__(self, *args, **kwargs): ReloaderLoop.__init__(self, *args, **kwargs) from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler self.observable_paths = set() def _check_modification(filename): if filename in self.extra_files: self.trigger_reload(filename) dirname = os.path.dirname(filename) if dirname.startswith(tuple(self.observable_paths)): if filename.endswith((".pyc", ".pyo", ".py")): self.trigger_reload(filename) class _CustomHandler(FileSystemEventHandler): def on_created(self, event): _check_modification(event.src_path) def on_modified(self, event): _check_modification(event.src_path) def on_moved(self, event): _check_modification(event.src_path) _check_modification(event.dest_path) def on_deleted(self, event): _check_modification(event.src_path) reloader_name = Observer.__name__.lower() if reloader_name.endswith("observer"): reloader_name = reloader_name[:-8] reloader_name += " reloader" self.name = reloader_name self.observer_class = Observer self.event_handler = _CustomHandler() self.should_reload = False def trigger_reload(self, filename): # This is called inside an event handler, which means throwing # SystemExit has no effect. # https://github.com/gorakhargosh/watchdog/issues/294 self.should_reload = True self.log_reload(filename) def run(self): watches = {} observer = self.observer_class() observer.start() try: while not self.should_reload: to_delete = set(watches) paths = _find_observable_paths(self.extra_files) for path in paths: if path not in watches: try: watches[path] = observer.schedule( self.event_handler, path, recursive=True ) except OSError: # Clear this path from list of watches We don't want # the same error message showing again in the next # iteration. watches[path] = None to_delete.discard(path) for path in to_delete: watch = watches.pop(path, None) if watch is not None: observer.unschedule(watch) self.observable_paths = paths self._sleep(self.interval) finally: observer.stop() observer.join() sys.exit(3) reloader_loops = {"stat": StatReloaderLoop, "watchdog": WatchdogReloaderLoop} try: __import__("watchdog.observers") except ImportError: reloader_loops["auto"] = reloader_loops["stat"] else: reloader_loops["auto"] = reloader_loops["watchdog"] def ensure_echo_on(): """Ensure that echo mode is enabled. Some tools such as PDB disable it which causes usability issues after reload.""" # tcgetattr will fail if stdin isn't a tty if not sys.stdin.isatty(): return try: import termios except ImportError: return attributes = termios.tcgetattr(sys.stdin) if not attributes[3] & termios.ECHO: attributes[3] |= termios.ECHO termios.tcsetattr(sys.stdin, termios.TCSANOW, attributes) def run_with_reloader(main_func, extra_files=None, interval=1, reloader_type="auto"): """Run the given function in an independent python interpreter.""" import signal reloader = reloader_loops[reloader_type](extra_files, interval) signal.signal(signal.SIGTERM, lambda *args: sys.exit(0)) try: if os.environ.get("WERKZEUG_RUN_MAIN") == "true": ensure_echo_on() t = threading.Thread(target=main_func, args=()) t.setDaemon(True) t.start() reloader.run() else: sys.exit(reloader.restart_with_reloader()) except KeyboardInterrupt: pass
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/_internal.py
# -*- coding: utf-8 -*- """ werkzeug._internal ~~~~~~~~~~~~~~~~~~ This module provides internally used helpers and constants. :copyright: 2007 Pallets :license: BSD-3-Clause """ import inspect import logging import re import string from datetime import date from datetime import datetime from itertools import chain from weakref import WeakKeyDictionary from ._compat import int_to_byte from ._compat import integer_types from ._compat import iter_bytes from ._compat import range_type from ._compat import text_type _logger = None _signature_cache = WeakKeyDictionary() _epoch_ord = date(1970, 1, 1).toordinal() _legal_cookie_chars = ( string.ascii_letters + string.digits + u"/=!#$%&'*+-.^_`|~:" ).encode("ascii") _cookie_quoting_map = {b",": b"\\054", b";": b"\\073", b'"': b'\\"', b"\\": b"\\\\"} for _i in chain(range_type(32), range_type(127, 256)): _cookie_quoting_map[int_to_byte(_i)] = ("\\%03o" % _i).encode("latin1") _octal_re = re.compile(br"\\[0-3][0-7][0-7]") _quote_re = re.compile(br"[\\].") _legal_cookie_chars_re = br"[\w\d!#%&\'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]" _cookie_re = re.compile( br""" (?P<key>[^=;]+) (?:\s*=\s* (?P<val> "(?:[^\\"]|\\.)*" | (?:.*?) ) )? \s*; """, flags=re.VERBOSE, ) class _Missing(object): def __repr__(self): return "no value" def __reduce__(self): return "_missing" _missing = _Missing() def _get_environ(obj): env = getattr(obj, "environ", obj) assert isinstance(env, dict), ( "%r is not a WSGI environment (has to be a dict)" % type(obj).__name__ ) return env def _has_level_handler(logger): """Check if there is a handler in the logging chain that will handle the given logger's effective level. """ level = logger.getEffectiveLevel() current = logger while current: if any(handler.level <= level for handler in current.handlers): return True if not current.propagate: break current = current.parent return False def _log(type, message, *args, **kwargs): """Log a message to the 'werkzeug' logger. The logger is created the first time it is needed. If there is no level set, it is set to :data:`logging.INFO`. If there is no handler for the logger's effective level, a :class:`logging.StreamHandler` is added. """ global _logger if _logger is None: _logger = logging.getLogger("werkzeug") if _logger.level == logging.NOTSET: _logger.setLevel(logging.INFO) if not _has_level_handler(_logger): _logger.addHandler(logging.StreamHandler()) getattr(_logger, type)(message.rstrip(), *args, **kwargs) def _parse_signature(func): """Return a signature object for the function.""" if hasattr(func, "im_func"): func = func.im_func # if we have a cached validator for this function, return it parse = _signature_cache.get(func) if parse is not None: return parse # inspect the function signature and collect all the information if hasattr(inspect, "getfullargspec"): tup = inspect.getfullargspec(func) else: tup = inspect.getargspec(func) positional, vararg_var, kwarg_var, defaults = tup[:4] defaults = defaults or () arg_count = len(positional) arguments = [] for idx, name in enumerate(positional): if isinstance(name, list): raise TypeError( "cannot parse functions that unpack tuples in the function signature" ) try: default = defaults[idx - arg_count] except IndexError: param = (name, False, None) else: param = (name, True, default) arguments.append(param) arguments = tuple(arguments) def parse(args, kwargs): new_args = [] missing = [] extra = {} # consume as many arguments as positional as possible for idx, (name, has_default, default) in enumerate(arguments): try: new_args.append(args[idx]) except IndexError: try: new_args.append(kwargs.pop(name)) except KeyError: if has_default: new_args.append(default) else: missing.append(name) else: if name in kwargs: extra[name] = kwargs.pop(name) # handle extra arguments extra_positional = args[arg_count:] if vararg_var is not None: new_args.extend(extra_positional) extra_positional = () if kwargs and kwarg_var is None: extra.update(kwargs) kwargs = {} return ( new_args, kwargs, missing, extra, extra_positional, arguments, vararg_var, kwarg_var, ) _signature_cache[func] = parse return parse def _date_to_unix(arg): """Converts a timetuple, integer or datetime object into the seconds from epoch in utc. """ if isinstance(arg, datetime): arg = arg.utctimetuple() elif isinstance(arg, integer_types + (float,)): return int(arg) year, month, day, hour, minute, second = arg[:6] days = date(year, month, 1).toordinal() - _epoch_ord + day - 1 hours = days * 24 + hour minutes = hours * 60 + minute seconds = minutes * 60 + second return seconds class _DictAccessorProperty(object): """Baseclass for `environ_property` and `header_property`.""" read_only = False def __init__( self, name, default=None, load_func=None, dump_func=None, read_only=None, doc=None, ): self.name = name self.default = default self.load_func = load_func self.dump_func = dump_func if read_only is not None: self.read_only = read_only self.__doc__ = doc def __get__(self, obj, type=None): if obj is None: return self storage = self.lookup(obj) if self.name not in storage: return self.default rv = storage[self.name] if self.load_func is not None: try: rv = self.load_func(rv) except (ValueError, TypeError): rv = self.default return rv def __set__(self, obj, value): if self.read_only: raise AttributeError("read only property") if self.dump_func is not None: value = self.dump_func(value) self.lookup(obj)[self.name] = value def __delete__(self, obj): if self.read_only: raise AttributeError("read only property") self.lookup(obj).pop(self.name, None) def __repr__(self): return "<%s %s>" % (self.__class__.__name__, self.name) def _cookie_quote(b): buf = bytearray() all_legal = True _lookup = _cookie_quoting_map.get _push = buf.extend for char in iter_bytes(b): if char not in _legal_cookie_chars: all_legal = False char = _lookup(char, char) _push(char) if all_legal: return bytes(buf) return bytes(b'"' + buf + b'"') def _cookie_unquote(b): if len(b) < 2: return b if b[:1] != b'"' or b[-1:] != b'"': return b b = b[1:-1] i = 0 n = len(b) rv = bytearray() _push = rv.extend while 0 <= i < n: o_match = _octal_re.search(b, i) q_match = _quote_re.search(b, i) if not o_match and not q_match: rv.extend(b[i:]) break j = k = -1 if o_match: j = o_match.start(0) if q_match: k = q_match.start(0) if q_match and (not o_match or k < j): _push(b[i:k]) _push(b[k + 1 : k + 2]) i = k + 2 else: _push(b[i:j]) rv.append(int(b[j + 1 : j + 4], 8)) i = j + 4 return bytes(rv) def _cookie_parse_impl(b): """Lowlevel cookie parsing facility that operates on bytes.""" i = 0 n = len(b) while i < n: match = _cookie_re.search(b + b";", i) if not match: break key = match.group("key").strip() value = match.group("val") or b"" i = match.end(0) yield _cookie_unquote(key), _cookie_unquote(value) def _encode_idna(domain): # If we're given bytes, make sure they fit into ASCII if not isinstance(domain, text_type): domain.decode("ascii") return domain # Otherwise check if it's already ascii, then return try: return domain.encode("ascii") except UnicodeError: pass # Otherwise encode each part separately parts = domain.split(".") for idx, part in enumerate(parts): parts[idx] = part.encode("idna") return b".".join(parts) def _decode_idna(domain): # If the input is a string try to encode it to ascii to # do the idna decoding. if that fails because of an # unicode error, then we already have a decoded idna domain if isinstance(domain, text_type): try: domain = domain.encode("ascii") except UnicodeError: return domain # Decode each part separately. If a part fails, try to # decode it with ascii and silently ignore errors. This makes # most sense because the idna codec does not have error handling parts = domain.split(b".") for idx, part in enumerate(parts): try: parts[idx] = part.decode("idna") except UnicodeError: parts[idx] = part.decode("ascii", "ignore") return ".".join(parts) def _make_cookie_domain(domain): if domain is None: return None domain = _encode_idna(domain) if b":" in domain: domain = domain.split(b":", 1)[0] if b"." in domain: return domain raise ValueError( "Setting 'domain' for a cookie on a server running locally (ex: " "localhost) is not supported by complying browsers. You should " "have something like: '127.0.0.1 localhost dev.localhost' on " "your hosts file and then point your server to run on " "'dev.localhost' and also set 'domain' for 'dev.localhost'" ) def _easteregg(app=None): """Like the name says. But who knows how it works?""" def bzzzzzzz(gyver): import base64 import zlib return zlib.decompress(base64.b64decode(gyver)).decode("ascii") gyver = u"\n".join( [ x + (77 - len(x)) * u" " for x in bzzzzzzz( b""" eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m 9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz 4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5 jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317 8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE 1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG 8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8 MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4 GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/ nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p 7f2zLkGNv8b191cD/3vs9Q833z8t""" ).splitlines() ] ) def easteregged(environ, start_response): def injecting_start_response(status, headers, exc_info=None): headers.append(("X-Powered-By", "Werkzeug")) return start_response(status, headers, exc_info) if app is not None and environ.get("QUERY_STRING") != "macgybarchakku": return app(environ, injecting_start_response) injecting_start_response("200 OK", [("Content-Type", "text/html")]) return [ ( u""" <!DOCTYPE html> <html> <head> <title>About Werkzeug</title> <style type="text/css"> body { font: 15px Georgia, serif; text-align: center; } a { color: #333; text-decoration: none; } h1 { font-size: 30px; margin: 20px 0 10px 0; } p { margin: 0 0 30px 0; } pre { font: 11px 'Consolas', 'Monaco', monospace; line-height: 0.95; } </style> </head> <body> <h1><a href="http://werkzeug.pocoo.org/">Werkzeug</a></h1> <p>the Swiss Army knife of Python web development.</p> <pre>%s\n\n\n</pre> </body> </html>""" % gyver ).encode("latin1") ] return easteregged
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/serving.py
# -*- coding: utf-8 -*- """ werkzeug.serving ~~~~~~~~~~~~~~~~ There are many ways to serve a WSGI application. While you're developing it you usually don't want a full blown webserver like Apache but a simple standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in the standard library. If you're using older versions of Python you can download the package from the cheeseshop. However there are some caveats. Sourcecode won't reload itself when changed and each time you kill the server using ``^C`` you get an `KeyboardInterrupt` error. While the latter is easy to solve the first one can be a pain in the ass in some situations. The easiest way is creating a small ``start-myproject.py`` that runs the application:: #!/usr/bin/env python # -*- coding: utf-8 -*- from myproject import make_app from werkzeug.serving import run_simple app = make_app(...) run_simple('localhost', 8080, app, use_reloader=True) You can also pass it a `extra_files` keyword argument with a list of additional files (like configuration files) you want to observe. For bigger applications you should consider using `click` (http://click.pocoo.org) instead of a simple start file. :copyright: 2007 Pallets :license: BSD-3-Clause """ import io import os import signal import socket import sys from datetime import datetime as dt from datetime import timedelta from ._compat import PY2 from ._compat import reraise from ._compat import WIN from ._compat import wsgi_encoding_dance from ._internal import _log from .exceptions import InternalServerError from .urls import uri_to_iri from .urls import url_parse from .urls import url_unquote try: import socketserver from http.server import BaseHTTPRequestHandler from http.server import HTTPServer except ImportError: import SocketServer as socketserver from BaseHTTPServer import HTTPServer from BaseHTTPServer import BaseHTTPRequestHandler try: import ssl except ImportError: class _SslDummy(object): def __getattr__(self, name): raise RuntimeError("SSL support unavailable") ssl = _SslDummy() try: import click except ImportError: click = None ThreadingMixIn = socketserver.ThreadingMixIn can_fork = hasattr(os, "fork") if can_fork: ForkingMixIn = socketserver.ForkingMixIn else: class ForkingMixIn(object): pass try: af_unix = socket.AF_UNIX except AttributeError: af_unix = None LISTEN_QUEUE = 128 can_open_by_fd = not WIN and hasattr(socket, "fromfd") # On Python 3, ConnectionError represents the same errnos as # socket.error from Python 2, while socket.error is an alias for the # more generic OSError. if PY2: _ConnectionError = socket.error else: _ConnectionError = ConnectionError class DechunkedInput(io.RawIOBase): """An input stream that handles Transfer-Encoding 'chunked'""" def __init__(self, rfile): self._rfile = rfile self._done = False self._len = 0 def readable(self): return True def read_chunk_len(self): try: line = self._rfile.readline().decode("latin1") _len = int(line.strip(), 16) except ValueError: raise IOError("Invalid chunk header") if _len < 0: raise IOError("Negative chunk length not allowed") return _len def readinto(self, buf): read = 0 while not self._done and read < len(buf): if self._len == 0: # This is the first chunk or we fully consumed the previous # one. Read the next length of the next chunk self._len = self.read_chunk_len() if self._len == 0: # Found the final chunk of size 0. The stream is now exhausted, # but there is still a final newline that should be consumed self._done = True if self._len > 0: # There is data (left) in this chunk, so append it to the # buffer. If this operation fully consumes the chunk, this will # reset self._len to 0. n = min(len(buf), self._len) buf[read : read + n] = self._rfile.read(n) self._len -= n read += n if self._len == 0: # Skip the terminating newline of a chunk that has been fully # consumed. This also applies to the 0-sized final chunk terminator = self._rfile.readline() if terminator not in (b"\n", b"\r\n", b"\r"): raise IOError("Missing chunk terminating newline") return read class WSGIRequestHandler(BaseHTTPRequestHandler, object): """A request handler that implements WSGI dispatching.""" @property def server_version(self): from . import __version__ return "Werkzeug/" + __version__ def make_environ(self): request_url = url_parse(self.path) def shutdown_server(): self.server.shutdown_signal = True url_scheme = "http" if self.server.ssl_context is None else "https" if not self.client_address: self.client_address = "<local>" if isinstance(self.client_address, str): self.client_address = (self.client_address, 0) else: pass # If there was no scheme but the path started with two slashes, # the first segment may have been incorrectly parsed as the # netloc, prepend it to the path again. if not request_url.scheme and request_url.netloc: path_info = "/%s%s" % (request_url.netloc, request_url.path) else: path_info = request_url.path path_info = url_unquote(path_info) environ = { "wsgi.version": (1, 0), "wsgi.url_scheme": url_scheme, "wsgi.input": self.rfile, "wsgi.errors": sys.stderr, "wsgi.multithread": self.server.multithread, "wsgi.multiprocess": self.server.multiprocess, "wsgi.run_once": False, "werkzeug.server.shutdown": shutdown_server, "SERVER_SOFTWARE": self.server_version, "REQUEST_METHOD": self.command, "SCRIPT_NAME": "", "PATH_INFO": wsgi_encoding_dance(path_info), "QUERY_STRING": wsgi_encoding_dance(request_url.query), # Non-standard, added by mod_wsgi, uWSGI "REQUEST_URI": wsgi_encoding_dance(self.path), # Non-standard, added by gunicorn "RAW_URI": wsgi_encoding_dance(self.path), "REMOTE_ADDR": self.address_string(), "REMOTE_PORT": self.port_integer(), "SERVER_NAME": self.server.server_address[0], "SERVER_PORT": str(self.server.server_address[1]), "SERVER_PROTOCOL": self.request_version, } for key, value in self.get_header_items(): key = key.upper().replace("-", "_") value = value.replace("\r\n", "") if key not in ("CONTENT_TYPE", "CONTENT_LENGTH"): key = "HTTP_" + key if key in environ: value = "{},{}".format(environ[key], value) environ[key] = value if environ.get("HTTP_TRANSFER_ENCODING", "").strip().lower() == "chunked": environ["wsgi.input_terminated"] = True environ["wsgi.input"] = DechunkedInput(environ["wsgi.input"]) # Per RFC 2616, if the URL is absolute, use that as the host. # We're using "has a scheme" to indicate an absolute URL. if request_url.scheme and request_url.netloc: environ["HTTP_HOST"] = request_url.netloc try: # binary_form=False gives nicer information, but wouldn't be compatible with # what Nginx or Apache could return. peer_cert = self.connection.getpeercert(binary_form=True) if peer_cert is not None: # Nginx and Apache use PEM format. environ["SSL_CLIENT_CERT"] = ssl.DER_cert_to_PEM_cert(peer_cert) except ValueError: # SSL handshake hasn't finished. self.server.log("error", "Cannot fetch SSL peer certificate info") except AttributeError: # Not using TLS, the socket will not have getpeercert(). pass return environ def run_wsgi(self): if self.headers.get("Expect", "").lower().strip() == "100-continue": self.wfile.write(b"HTTP/1.1 100 Continue\r\n\r\n") self.environ = environ = self.make_environ() headers_set = [] headers_sent = [] def write(data): assert headers_set, "write() before start_response" if not headers_sent: status, response_headers = headers_sent[:] = headers_set try: code, msg = status.split(None, 1) except ValueError: code, msg = status, "" code = int(code) self.send_response(code, msg) header_keys = set() for key, value in response_headers: self.send_header(key, value) key = key.lower() header_keys.add(key) if not ( "content-length" in header_keys or environ["REQUEST_METHOD"] == "HEAD" or code < 200 or code in (204, 304) ): self.close_connection = True self.send_header("Connection", "close") if "server" not in header_keys: self.send_header("Server", self.version_string()) if "date" not in header_keys: self.send_header("Date", self.date_time_string()) self.end_headers() assert isinstance(data, bytes), "applications must write bytes" if data: # Only write data if there is any to avoid Python 3.5 SSL bug self.wfile.write(data) self.wfile.flush() def start_response(status, response_headers, exc_info=None): if exc_info: try: if headers_sent: reraise(*exc_info) finally: exc_info = None elif headers_set: raise AssertionError("Headers already set") headers_set[:] = [status, response_headers] return write def execute(app): application_iter = app(environ, start_response) try: for data in application_iter: write(data) if not headers_sent: write(b"") finally: if hasattr(application_iter, "close"): application_iter.close() try: execute(self.server.app) except (_ConnectionError, socket.timeout) as e: self.connection_dropped(e, environ) except Exception: if self.server.passthrough_errors: raise from .debug.tbtools import get_current_traceback traceback = get_current_traceback(ignore_system_exceptions=True) try: # if we haven't yet sent the headers but they are set # we roll back to be able to set them again. if not headers_sent: del headers_set[:] execute(InternalServerError()) except Exception: pass self.server.log("error", "Error on request:\n%s", traceback.plaintext) def handle(self): """Handles a request ignoring dropped connections.""" try: BaseHTTPRequestHandler.handle(self) except (_ConnectionError, socket.timeout) as e: self.connection_dropped(e) except Exception as e: if self.server.ssl_context is None or not is_ssl_error(e): raise if self.server.shutdown_signal: self.initiate_shutdown() def initiate_shutdown(self): """A horrible, horrible way to kill the server for Python 2.6 and later. It's the best we can do. """ # Windows does not provide SIGKILL, go with SIGTERM then. sig = getattr(signal, "SIGKILL", signal.SIGTERM) # reloader active if is_running_from_reloader(): os.kill(os.getpid(), sig) # python 2.7 self.server._BaseServer__shutdown_request = True # python 2.6 self.server._BaseServer__serving = False def connection_dropped(self, error, environ=None): """Called if the connection was closed by the client. By default nothing happens. """ def handle_one_request(self): """Handle a single HTTP request.""" self.raw_requestline = self.rfile.readline() if not self.raw_requestline: self.close_connection = 1 elif self.parse_request(): return self.run_wsgi() def send_response(self, code, message=None): """Send the response header and log the response code.""" self.log_request(code) if message is None: message = code in self.responses and self.responses[code][0] or "" if self.request_version != "HTTP/0.9": hdr = "%s %d %s\r\n" % (self.protocol_version, code, message) self.wfile.write(hdr.encode("ascii")) def version_string(self): return BaseHTTPRequestHandler.version_string(self).strip() def address_string(self): if getattr(self, "environ", None): return self.environ["REMOTE_ADDR"] elif not self.client_address: return "<local>" elif isinstance(self.client_address, str): return self.client_address else: return self.client_address[0] def port_integer(self): return self.client_address[1] def log_request(self, code="-", size="-"): try: path = uri_to_iri(self.path) msg = "%s %s %s" % (self.command, path, self.request_version) except AttributeError: # path isn't set if the requestline was bad msg = self.requestline code = str(code) if click: color = click.style if code[0] == "1": # 1xx - Informational msg = color(msg, bold=True) elif code[0] == "2": # 2xx - Success msg = color(msg, fg="white") elif code == "304": # 304 - Resource Not Modified msg = color(msg, fg="cyan") elif code[0] == "3": # 3xx - Redirection msg = color(msg, fg="green") elif code == "404": # 404 - Resource Not Found msg = color(msg, fg="yellow") elif code[0] == "4": # 4xx - Client Error msg = color(msg, fg="red", bold=True) else: # 5xx, or any other response msg = color(msg, fg="magenta", bold=True) self.log("info", '"%s" %s %s', msg, code, size) def log_error(self, *args): self.log("error", *args) def log_message(self, format, *args): self.log("info", format, *args) def log(self, type, message, *args): _log( type, "%s - - [%s] %s\n" % (self.address_string(), self.log_date_time_string(), message % args), ) def get_header_items(self): """ Get an iterable list of key/value pairs representing headers. This function provides Python 2/3 compatibility as related to the parsing of request headers. Python 2.7 is not compliant with RFC 3875 Section 4.1.18 which requires multiple values for headers to be provided or RFC 2616 which allows for folding of multi-line headers. This function will return a matching list regardless of Python version. It can be removed once Python 2.7 support is dropped. :return: List of tuples containing header hey/value pairs """ if PY2: # For Python 2, process the headers manually according to # W3C RFC 2616 Section 4.2. items = [] for header in self.headers.headers: # Remove "\r\n" from the header and split on ":" to get # the field name and value. try: key, value = header[0:-2].split(":", 1) except ValueError: # If header could not be slit with : but starts with white # space and it follows an existing header, it's a folded # header. if header[0] in ("\t", " ") and items: # Pop off the last header key, value = items.pop() # Append the current header to the value of the last # header which will be placed back on the end of the # list value = value + header # Otherwise it's just a bad header and should error else: # Re-raise the value error raise # Add the key and the value once stripped of leading # white space. The specification allows for stripping # trailing white space but the Python 3 code does not # strip trailing white space. Therefore, trailing space # will be left as is to match the Python 3 behavior. items.append((key, value.lstrip())) else: items = self.headers.items() return items #: backwards compatible name if someone is subclassing it BaseRequestHandler = WSGIRequestHandler def generate_adhoc_ssl_pair(cn=None): try: from cryptography import x509 from cryptography.x509.oid import NameOID from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.asymmetric import rsa except ImportError: raise TypeError("Using ad-hoc certificates requires the cryptography library.") pkey = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend() ) # pretty damn sure that this is not actually accepted by anyone if cn is None: cn = u"*" subject = x509.Name( [ x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"Dummy Certificate"), x509.NameAttribute(NameOID.COMMON_NAME, cn), ] ) cert = ( x509.CertificateBuilder() .subject_name(subject) .issuer_name(subject) .public_key(pkey.public_key()) .serial_number(x509.random_serial_number()) .not_valid_before(dt.utcnow()) .not_valid_after(dt.utcnow() + timedelta(days=365)) .add_extension(x509.ExtendedKeyUsage([x509.OID_SERVER_AUTH]), critical=False) .add_extension( x509.SubjectAlternativeName([x509.DNSName(u"*")]), critical=False ) .sign(pkey, hashes.SHA256(), default_backend()) ) return cert, pkey def make_ssl_devcert(base_path, host=None, cn=None): """Creates an SSL key for development. This should be used instead of the ``'adhoc'`` key which generates a new cert on each server start. It accepts a path for where it should store the key and cert and either a host or CN. If a host is given it will use the CN ``*.host/CN=host``. For more information see :func:`run_simple`. .. versionadded:: 0.9 :param base_path: the path to the certificate and key. The extension ``.crt`` is added for the certificate, ``.key`` is added for the key. :param host: the name of the host. This can be used as an alternative for the `cn`. :param cn: the `CN` to use. """ if host is not None: cn = u"*.%s/CN=%s" % (host, host) cert, pkey = generate_adhoc_ssl_pair(cn=cn) from cryptography.hazmat.primitives import serialization cert_file = base_path + ".crt" pkey_file = base_path + ".key" with open(cert_file, "wb") as f: f.write(cert.public_bytes(serialization.Encoding.PEM)) with open(pkey_file, "wb") as f: f.write( pkey.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption(), ) ) return cert_file, pkey_file def generate_adhoc_ssl_context(): """Generates an adhoc SSL context for the development server.""" import tempfile import atexit cert, pkey = generate_adhoc_ssl_pair() from cryptography.hazmat.primitives import serialization cert_handle, cert_file = tempfile.mkstemp() pkey_handle, pkey_file = tempfile.mkstemp() atexit.register(os.remove, pkey_file) atexit.register(os.remove, cert_file) os.write(cert_handle, cert.public_bytes(serialization.Encoding.PEM)) os.write( pkey_handle, pkey.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption(), ), ) os.close(cert_handle) os.close(pkey_handle) ctx = load_ssl_context(cert_file, pkey_file) return ctx def load_ssl_context(cert_file, pkey_file=None, protocol=None): """Loads SSL context from cert/private key files and optional protocol. Many parameters are directly taken from the API of :py:class:`ssl.SSLContext`. :param cert_file: Path of the certificate to use. :param pkey_file: Path of the private key to use. If not given, the key will be obtained from the certificate file. :param protocol: One of the ``PROTOCOL_*`` constants in the stdlib ``ssl`` module. Defaults to ``PROTOCOL_SSLv23``. """ if protocol is None: try: protocol = ssl.PROTOCOL_TLS_SERVER except AttributeError: # Python <= 3.5 compat protocol = ssl.PROTOCOL_SSLv23 ctx = _SSLContext(protocol) ctx.load_cert_chain(cert_file, pkey_file) return ctx class _SSLContext(object): """A dummy class with a small subset of Python3's ``ssl.SSLContext``, only intended to be used with and by Werkzeug.""" def __init__(self, protocol): self._protocol = protocol self._certfile = None self._keyfile = None self._password = None def load_cert_chain(self, certfile, keyfile=None, password=None): self._certfile = certfile self._keyfile = keyfile or certfile self._password = password def wrap_socket(self, sock, **kwargs): return ssl.wrap_socket( sock, keyfile=self._keyfile, certfile=self._certfile, ssl_version=self._protocol, **kwargs ) def is_ssl_error(error=None): """Checks if the given error (or the current one) is an SSL error.""" if error is None: error = sys.exc_info()[1] return isinstance(error, ssl.SSLError) def select_address_family(host, port): """Return ``AF_INET4``, ``AF_INET6``, or ``AF_UNIX`` depending on the host and port.""" # disabled due to problems with current ipv6 implementations # and various operating systems. Probably this code also is # not supposed to work, but I can't come up with any other # ways to implement this. # try: # info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, # socket.SOCK_STREAM, 0, # socket.AI_PASSIVE) # if info: # return info[0][0] # except socket.gaierror: # pass if host.startswith("unix://"): return socket.AF_UNIX elif ":" in host and hasattr(socket, "AF_INET6"): return socket.AF_INET6 return socket.AF_INET def get_sockaddr(host, port, family): """Return a fully qualified socket address that can be passed to :func:`socket.bind`.""" if family == af_unix: return host.split("://", 1)[1] try: res = socket.getaddrinfo( host, port, family, socket.SOCK_STREAM, socket.IPPROTO_TCP ) except socket.gaierror: return host, port return res[0][4] class BaseWSGIServer(HTTPServer, object): """Simple single-threaded, single-process WSGI server.""" multithread = False multiprocess = False request_queue_size = LISTEN_QUEUE def __init__( self, host, port, app, handler=None, passthrough_errors=False, ssl_context=None, fd=None, ): if handler is None: handler = WSGIRequestHandler self.address_family = select_address_family(host, port) if fd is not None: real_sock = socket.fromfd(fd, self.address_family, socket.SOCK_STREAM) port = 0 server_address = get_sockaddr(host, int(port), self.address_family) # remove socket file if it already exists if self.address_family == af_unix and os.path.exists(server_address): os.unlink(server_address) HTTPServer.__init__(self, server_address, handler) self.app = app self.passthrough_errors = passthrough_errors self.shutdown_signal = False self.host = host self.port = self.socket.getsockname()[1] # Patch in the original socket. if fd is not None: self.socket.close() self.socket = real_sock self.server_address = self.socket.getsockname() if ssl_context is not None: if isinstance(ssl_context, tuple): ssl_context = load_ssl_context(*ssl_context) if ssl_context == "adhoc": ssl_context = generate_adhoc_ssl_context() # If we are on Python 2 the return value from socket.fromfd # is an internal socket object but what we need for ssl wrap # is the wrapper around it :( sock = self.socket if PY2 and not isinstance(sock, socket.socket): sock = socket.socket(sock.family, sock.type, sock.proto, sock) self.socket = ssl_context.wrap_socket(sock, server_side=True) self.ssl_context = ssl_context else: self.ssl_context = None def log(self, type, message, *args): _log(type, message, *args) def serve_forever(self): self.shutdown_signal = False try: HTTPServer.serve_forever(self) except KeyboardInterrupt: pass finally: self.server_close() def handle_error(self, request, client_address): if self.passthrough_errors: raise # Python 2 still causes a socket.error after the earlier # handling, so silence it here. if isinstance(sys.exc_info()[1], _ConnectionError): return return HTTPServer.handle_error(self, request, client_address) def get_request(self): con, info = self.socket.accept() return con, info class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer): """A WSGI server that does threading.""" multithread = True daemon_threads = True class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer): """A WSGI server that does forking.""" multiprocess = True def __init__( self, host, port, app, processes=40, handler=None, passthrough_errors=False, ssl_context=None, fd=None, ): if not can_fork: raise ValueError("Your platform does not support forking.") BaseWSGIServer.__init__( self, host, port, app, handler, passthrough_errors, ssl_context, fd ) self.max_children = processes def make_server( host=None, port=None, app=None, threaded=False, processes=1, request_handler=None, passthrough_errors=False, ssl_context=None, fd=None, ): """Create a new server instance that is either threaded, or forks or just processes one request after another. """ if threaded and processes > 1: raise ValueError("cannot have a multithreaded and multi process server.") elif threaded: return ThreadedWSGIServer( host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd ) elif processes > 1: return ForkingWSGIServer( host, port, app, processes, request_handler, passthrough_errors, ssl_context, fd=fd, ) else: return BaseWSGIServer( host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd ) def is_running_from_reloader(): """Checks if the application is running from within the Werkzeug reloader subprocess. .. versionadded:: 0.10 """ return os.environ.get("WERKZEUG_RUN_MAIN") == "true" def run_simple( hostname, port, application, use_reloader=False, use_debugger=False, use_evalex=True, extra_files=None, reloader_interval=1, reloader_type="auto", threaded=False, processes=1, request_handler=None, static_files=None, passthrough_errors=False, ssl_context=None, ): """Start a WSGI application. Optional features include a reloader, multithreading and fork support. This function has a command-line interface too:: python -m werkzeug.serving --help .. versionadded:: 0.5 `static_files` was added to simplify serving of static files as well as `passthrough_errors`. .. versionadded:: 0.6 support for SSL was added. .. versionadded:: 0.8 Added support for automatically loading a SSL context from certificate file and private key. .. versionadded:: 0.9 Added command-line interface. .. versionadded:: 0.10 Improved the reloader and added support for changing the backend through the `reloader_type` parameter. See :ref:`reloader` for more information. .. versionchanged:: 0.15 Bind to a Unix socket by passing a path that starts with ``unix://`` as the ``hostname``. :param hostname: The host to bind to, for example ``'localhost'``. If the value is a path that starts with ``unix://`` it will bind to a Unix socket instead of a TCP socket.. :param port: The port for the server. eg: ``8080`` :param application: the WSGI application to execute :param use_reloader: should the server automatically restart the python process if modules were changed? :param use_debugger: should the werkzeug debugging system be used? :param use_evalex: should the exception evaluation feature be enabled? :param extra_files: a list of files the reloader should watch additionally to the modules. For example configuration files. :param reloader_interval: the interval for the reloader in seconds. :param reloader_type: the type of reloader to use. The default is auto detection. Valid values are ``'stat'`` and ``'watchdog'``. See :ref:`reloader` for more information. :param threaded: should the process handle each request in a separate thread? :param processes: if greater than 1 then handle each request in a new process up to this maximum number of concurrent processes. :param request_handler: optional parameter that can be used to replace the default one. You can use this to replace it with a different :class:`~BaseHTTPServer.BaseHTTPRequestHandler` subclass. :param static_files: a list or dict of paths for static files. This works exactly like :class:`SharedDataMiddleware`, it's actually just wrapping the application in that middleware before serving. :param passthrough_errors: set this to `True` to disable the error catching. This means that the server will die on errors but it can be useful to hook debuggers in (pdb etc.) :param ssl_context: an SSL context for the connection. Either an :class:`ssl.SSLContext`, a tuple in the form ``(cert_file, pkey_file)``, the string ``'adhoc'`` if the server should automatically create one, or ``None`` to disable SSL (which is the default). """ if not isinstance(port, int): raise TypeError("port must be an integer") if use_debugger: from .debug import DebuggedApplication application = DebuggedApplication(application, use_evalex) if static_files: from .middleware.shared_data import SharedDataMiddleware application = SharedDataMiddleware(application, static_files) def log_startup(sock): display_hostname = hostname if hostname not in ("", "*") else "localhost" quit_msg = "(Press CTRL+C to quit)" if sock.family == af_unix: _log("info", " * Running on %s %s", display_hostname, quit_msg) else: if ":" in display_hostname: display_hostname = "[%s]" % display_hostname port = sock.getsockname()[1] _log( "info", " * Running on %s://%s:%d/ %s", "http" if ssl_context is None else "https", display_hostname, port, quit_msg, ) def inner(): try: fd = int(os.environ["WERKZEUG_SERVER_FD"]) except (LookupError, ValueError): fd = None srv = make_server( hostname, port, application, threaded, processes, request_handler, passthrough_errors, ssl_context, fd=fd, ) if fd is None: log_startup(srv.socket) srv.serve_forever() if use_reloader: # If we're not running already in the subprocess that is the # reloader we want to open up a socket early to make sure the # port is actually available. if not is_running_from_reloader(): if port == 0 and not can_open_by_fd: raise ValueError( "Cannot bind to a random port with enabled " "reloader if the Python interpreter does " "not support socket opening by fd." ) # Create and destroy a socket so that any exceptions are # raised before we spawn a separate Python interpreter and # lose this ability. address_family = select_address_family(hostname, port) server_address = get_sockaddr(hostname, port, address_family) s = socket.socket(address_family, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(server_address) if hasattr(s, "set_inheritable"): s.set_inheritable(True) # If we can open the socket by file descriptor, then we can just # reuse this one and our socket will survive the restarts. if can_open_by_fd: os.environ["WERKZEUG_SERVER_FD"] = str(s.fileno()) s.listen(LISTEN_QUEUE) log_startup(s) else: s.close() if address_family == af_unix: _log("info", "Unlinking %s" % server_address) os.unlink(server_address) # Do not use relative imports, otherwise "python -m werkzeug.serving" # breaks. from ._reloader import run_with_reloader run_with_reloader(inner, extra_files, reloader_interval, reloader_type) else: inner() def run_with_reloader(*args, **kwargs): # People keep using undocumented APIs. Do not use this function # please, we do not guarantee that it continues working. from ._reloader import run_with_reloader return run_with_reloader(*args, **kwargs) def main(): """A simple command-line interface for :py:func:`run_simple`.""" # in contrast to argparse, this works at least under Python < 2.7 import optparse from .utils import import_string parser = optparse.OptionParser(usage="Usage: %prog [options] app_module:app_object") parser.add_option( "-b", "--bind", dest="address", help="The hostname:port the app should listen on.", ) parser.add_option( "-d", "--debug", dest="use_debugger", action="store_true", default=False, help="Use Werkzeug's debugger.", ) parser.add_option( "-r", "--reload", dest="use_reloader", action="store_true", default=False, help="Reload Python process if modules change.", ) options, args = parser.parse_args() hostname, port = None, None if options.address: address = options.address.split(":") hostname = address[0] if len(address) > 1: port = address[1] if len(args) != 1: sys.stdout.write("No application supplied, or too much. See --help\n") sys.exit(1) app = import_string(args[0]) run_simple( hostname=(hostname or "127.0.0.1"), port=int(port or 5000), application=app, use_reloader=options.use_reloader, use_debugger=options.use_debugger, ) if __name__ == "__main__": main()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/local.py
# -*- coding: utf-8 -*- """ werkzeug.local ~~~~~~~~~~~~~~ This module implements context-local objects. :copyright: 2007 Pallets :license: BSD-3-Clause """ import copy from functools import update_wrapper from ._compat import implements_bool from ._compat import PY2 from .wsgi import ClosingIterator # since each thread has its own greenlet we can just use those as identifiers # for the context. If greenlets are not available we fall back to the # current thread ident depending on where it is. try: from greenlet import getcurrent as get_ident except ImportError: try: from thread import get_ident except ImportError: from _thread import get_ident def release_local(local): """Releases the contents of the local for the current context. This makes it possible to use locals without a manager. Example:: >>> loc = Local() >>> loc.foo = 42 >>> release_local(loc) >>> hasattr(loc, 'foo') False With this function one can release :class:`Local` objects as well as :class:`LocalStack` objects. However it is not possible to release data held by proxies that way, one always has to retain a reference to the underlying local object in order to be able to release it. .. versionadded:: 0.6.1 """ local.__release_local__() class Local(object): __slots__ = ("__storage__", "__ident_func__") def __init__(self): object.__setattr__(self, "__storage__", {}) object.__setattr__(self, "__ident_func__", get_ident) def __iter__(self): return iter(self.__storage__.items()) def __call__(self, proxy): """Create a proxy for a name.""" return LocalProxy(self, proxy) def __release_local__(self): self.__storage__.pop(self.__ident_func__(), None) def __getattr__(self, name): try: return self.__storage__[self.__ident_func__()][name] except KeyError: raise AttributeError(name) def __setattr__(self, name, value): ident = self.__ident_func__() storage = self.__storage__ try: storage[ident][name] = value except KeyError: storage[ident] = {name: value} def __delattr__(self, name): try: del self.__storage__[self.__ident_func__()][name] except KeyError: raise AttributeError(name) class LocalStack(object): """This class works similar to a :class:`Local` but keeps a stack of objects instead. This is best explained with an example:: >>> ls = LocalStack() >>> ls.push(42) >>> ls.top 42 >>> ls.push(23) >>> ls.top 23 >>> ls.pop() 23 >>> ls.top 42 They can be force released by using a :class:`LocalManager` or with the :func:`release_local` function but the correct way is to pop the item from the stack after using. When the stack is empty it will no longer be bound to the current context (and as such released). By calling the stack without arguments it returns a proxy that resolves to the topmost item on the stack. .. versionadded:: 0.6.1 """ def __init__(self): self._local = Local() def __release_local__(self): self._local.__release_local__() @property def __ident_func__(self): return self._local.__ident_func__ @__ident_func__.setter def __ident_func__(self, value): object.__setattr__(self._local, "__ident_func__", value) def __call__(self): def _lookup(): rv = self.top if rv is None: raise RuntimeError("object unbound") return rv return LocalProxy(_lookup) def push(self, obj): """Pushes a new item to the stack""" rv = getattr(self._local, "stack", None) if rv is None: self._local.stack = rv = [] rv.append(obj) return rv def pop(self): """Removes the topmost item from the stack, will return the old value or `None` if the stack was already empty. """ stack = getattr(self._local, "stack", None) if stack is None: return None elif len(stack) == 1: release_local(self._local) return stack[-1] else: return stack.pop() @property def top(self): """The topmost item on the stack. If the stack is empty, `None` is returned. """ try: return self._local.stack[-1] except (AttributeError, IndexError): return None class LocalManager(object): """Local objects cannot manage themselves. For that you need a local manager. You can pass a local manager multiple locals or add them later by appending them to `manager.locals`. Every time the manager cleans up, it will clean up all the data left in the locals for this context. The `ident_func` parameter can be added to override the default ident function for the wrapped locals. .. versionchanged:: 0.6.1 Instead of a manager the :func:`release_local` function can be used as well. .. versionchanged:: 0.7 `ident_func` was added. """ def __init__(self, locals=None, ident_func=None): if locals is None: self.locals = [] elif isinstance(locals, Local): self.locals = [locals] else: self.locals = list(locals) if ident_func is not None: self.ident_func = ident_func for local in self.locals: object.__setattr__(local, "__ident_func__", ident_func) else: self.ident_func = get_ident def get_ident(self): """Return the context identifier the local objects use internally for this context. You cannot override this method to change the behavior but use it to link other context local objects (such as SQLAlchemy's scoped sessions) to the Werkzeug locals. .. versionchanged:: 0.7 You can pass a different ident function to the local manager that will then be propagated to all the locals passed to the constructor. """ return self.ident_func() def cleanup(self): """Manually clean up the data in the locals for this context. Call this at the end of the request or use `make_middleware()`. """ for local in self.locals: release_local(local) def make_middleware(self, app): """Wrap a WSGI application so that cleaning up happens after request end. """ def application(environ, start_response): return ClosingIterator(app(environ, start_response), self.cleanup) return application def middleware(self, func): """Like `make_middleware` but for decorating functions. Example usage:: @manager.middleware def application(environ, start_response): ... The difference to `make_middleware` is that the function passed will have all the arguments copied from the inner application (name, docstring, module). """ return update_wrapper(self.make_middleware(func), func) def __repr__(self): return "<%s storages: %d>" % (self.__class__.__name__, len(self.locals)) @implements_bool class LocalProxy(object): """Acts as a proxy for a werkzeug local. Forwards all operations to a proxied object. The only operations not supported for forwarding are right handed operands and any kind of assignment. Example usage:: from werkzeug.local import Local l = Local() # these are proxies request = l('request') user = l('user') from werkzeug.local import LocalStack _response_local = LocalStack() # this is a proxy response = _response_local() Whenever something is bound to l.user / l.request the proxy objects will forward all operations. If no object is bound a :exc:`RuntimeError` will be raised. To create proxies to :class:`Local` or :class:`LocalStack` objects, call the object as shown above. If you want to have a proxy to an object looked up by a function, you can (as of Werkzeug 0.6.1) pass a function to the :class:`LocalProxy` constructor:: session = LocalProxy(lambda: get_current_request().session) .. versionchanged:: 0.6.1 The class can be instantiated with a callable as well now. """ __slots__ = ("__local", "__dict__", "__name__", "__wrapped__") def __init__(self, local, name=None): object.__setattr__(self, "_LocalProxy__local", local) object.__setattr__(self, "__name__", name) if callable(local) and not hasattr(local, "__release_local__"): # "local" is a callable that is not an instance of Local or # LocalManager: mark it as a wrapped function. object.__setattr__(self, "__wrapped__", local) def _get_current_object(self): """Return the current object. This is useful if you want the real object behind the proxy at a time for performance reasons or because you want to pass the object into a different context. """ if not hasattr(self.__local, "__release_local__"): return self.__local() try: return getattr(self.__local, self.__name__) except AttributeError: raise RuntimeError("no object bound to %s" % self.__name__) @property def __dict__(self): try: return self._get_current_object().__dict__ except RuntimeError: raise AttributeError("__dict__") def __repr__(self): try: obj = self._get_current_object() except RuntimeError: return "<%s unbound>" % self.__class__.__name__ return repr(obj) def __bool__(self): try: return bool(self._get_current_object()) except RuntimeError: return False def __unicode__(self): try: return unicode(self._get_current_object()) # noqa except RuntimeError: return repr(self) def __dir__(self): try: return dir(self._get_current_object()) except RuntimeError: return [] def __getattr__(self, name): if name == "__members__": return dir(self._get_current_object()) return getattr(self._get_current_object(), name) def __setitem__(self, key, value): self._get_current_object()[key] = value def __delitem__(self, key): del self._get_current_object()[key] if PY2: __getslice__ = lambda x, i, j: x._get_current_object()[i:j] def __setslice__(self, i, j, seq): self._get_current_object()[i:j] = seq def __delslice__(self, i, j): del self._get_current_object()[i:j] __setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v) __delattr__ = lambda x, n: delattr(x._get_current_object(), n) __str__ = lambda x: str(x._get_current_object()) __lt__ = lambda x, o: x._get_current_object() < o __le__ = lambda x, o: x._get_current_object() <= o __eq__ = lambda x, o: x._get_current_object() == o __ne__ = lambda x, o: x._get_current_object() != o __gt__ = lambda x, o: x._get_current_object() > o __ge__ = lambda x, o: x._get_current_object() >= o __cmp__ = lambda x, o: cmp(x._get_current_object(), o) # noqa __hash__ = lambda x: hash(x._get_current_object()) __call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw) __len__ = lambda x: len(x._get_current_object()) __getitem__ = lambda x, i: x._get_current_object()[i] __iter__ = lambda x: iter(x._get_current_object()) __contains__ = lambda x, i: i in x._get_current_object() __add__ = lambda x, o: x._get_current_object() + o __sub__ = lambda x, o: x._get_current_object() - o __mul__ = lambda x, o: x._get_current_object() * o __floordiv__ = lambda x, o: x._get_current_object() // o __mod__ = lambda x, o: x._get_current_object() % o __divmod__ = lambda x, o: x._get_current_object().__divmod__(o) __pow__ = lambda x, o: x._get_current_object() ** o __lshift__ = lambda x, o: x._get_current_object() << o __rshift__ = lambda x, o: x._get_current_object() >> o __and__ = lambda x, o: x._get_current_object() & o __xor__ = lambda x, o: x._get_current_object() ^ o __or__ = lambda x, o: x._get_current_object() | o __div__ = lambda x, o: x._get_current_object().__div__(o) __truediv__ = lambda x, o: x._get_current_object().__truediv__(o) __neg__ = lambda x: -(x._get_current_object()) __pos__ = lambda x: +(x._get_current_object()) __abs__ = lambda x: abs(x._get_current_object()) __invert__ = lambda x: ~(x._get_current_object()) __complex__ = lambda x: complex(x._get_current_object()) __int__ = lambda x: int(x._get_current_object()) __long__ = lambda x: long(x._get_current_object()) # noqa __float__ = lambda x: float(x._get_current_object()) __oct__ = lambda x: oct(x._get_current_object()) __hex__ = lambda x: hex(x._get_current_object()) __index__ = lambda x: x._get_current_object().__index__() __coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o) __enter__ = lambda x: x._get_current_object().__enter__() __exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw) __radd__ = lambda x, o: o + x._get_current_object() __rsub__ = lambda x, o: o - x._get_current_object() __rmul__ = lambda x, o: o * x._get_current_object() __rdiv__ = lambda x, o: o / x._get_current_object() if PY2: __rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o) else: __rtruediv__ = __rdiv__ __rfloordiv__ = lambda x, o: o // x._get_current_object() __rmod__ = lambda x, o: o % x._get_current_object() __rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o) __copy__ = lambda x: copy.copy(x._get_current_object()) __deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/filesystem.py
# -*- coding: utf-8 -*- """ werkzeug.filesystem ~~~~~~~~~~~~~~~~~~~ Various utilities for the local filesystem. :copyright: 2007 Pallets :license: BSD-3-Clause """ import codecs import sys import warnings # We do not trust traditional unixes. has_likely_buggy_unicode_filesystem = ( sys.platform.startswith("linux") or "bsd" in sys.platform ) def _is_ascii_encoding(encoding): """Given an encoding this figures out if the encoding is actually ASCII (which is something we don't actually want in most cases). This is necessary because ASCII comes under many names such as ANSI_X3.4-1968. """ if encoding is None: return False try: return codecs.lookup(encoding).name == "ascii" except LookupError: return False class BrokenFilesystemWarning(RuntimeWarning, UnicodeWarning): """The warning used by Werkzeug to signal a broken filesystem. Will only be used once per runtime.""" _warned_about_filesystem_encoding = False def get_filesystem_encoding(): """Returns the filesystem encoding that should be used. Note that this is different from the Python understanding of the filesystem encoding which might be deeply flawed. Do not use this value against Python's unicode APIs because it might be different. See :ref:`filesystem-encoding` for the exact behavior. The concept of a filesystem encoding in generally is not something you should rely on. As such if you ever need to use this function except for writing wrapper code reconsider. """ global _warned_about_filesystem_encoding rv = sys.getfilesystemencoding() if has_likely_buggy_unicode_filesystem and not rv or _is_ascii_encoding(rv): if not _warned_about_filesystem_encoding: warnings.warn( "Detected a misconfigured UNIX filesystem: Will use" " UTF-8 as filesystem encoding instead of {0!r}".format(rv), BrokenFilesystemWarning, ) _warned_about_filesystem_encoding = True return "utf-8" return rv
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/security.py
# -*- coding: utf-8 -*- """ werkzeug.security ~~~~~~~~~~~~~~~~~ Security related helpers such as secure password hashing tools. :copyright: 2007 Pallets :license: BSD-3-Clause """ import codecs import hashlib import hmac import os import posixpath from random import SystemRandom from struct import Struct from ._compat import izip from ._compat import PY2 from ._compat import range_type from ._compat import text_type from ._compat import to_bytes from ._compat import to_native SALT_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" DEFAULT_PBKDF2_ITERATIONS = 150000 _pack_int = Struct(">I").pack _builtin_safe_str_cmp = getattr(hmac, "compare_digest", None) _sys_rng = SystemRandom() _os_alt_seps = list( sep for sep in [os.path.sep, os.path.altsep] if sep not in (None, "/") ) def pbkdf2_hex( data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS, keylen=None, hashfunc=None ): """Like :func:`pbkdf2_bin`, but returns a hex-encoded string. .. versionadded:: 0.9 :param data: the data to derive. :param salt: the salt for the derivation. :param iterations: the number of iterations. :param keylen: the length of the resulting key. If not provided, the digest size will be used. :param hashfunc: the hash function to use. This can either be the string name of a known hash function, or a function from the hashlib module. Defaults to sha256. """ rv = pbkdf2_bin(data, salt, iterations, keylen, hashfunc) return to_native(codecs.encode(rv, "hex_codec")) def pbkdf2_bin( data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS, keylen=None, hashfunc=None ): """Returns a binary digest for the PBKDF2 hash algorithm of `data` with the given `salt`. It iterates `iterations` times and produces a key of `keylen` bytes. By default, SHA-256 is used as hash function; a different hashlib `hashfunc` can be provided. .. versionadded:: 0.9 :param data: the data to derive. :param salt: the salt for the derivation. :param iterations: the number of iterations. :param keylen: the length of the resulting key. If not provided the digest size will be used. :param hashfunc: the hash function to use. This can either be the string name of a known hash function or a function from the hashlib module. Defaults to sha256. """ if not hashfunc: hashfunc = "sha256" data = to_bytes(data) salt = to_bytes(salt) if callable(hashfunc): _test_hash = hashfunc() hash_name = getattr(_test_hash, "name", None) else: hash_name = hashfunc return hashlib.pbkdf2_hmac(hash_name, data, salt, iterations, keylen) def safe_str_cmp(a, b): """This function compares strings in somewhat constant time. This requires that the length of at least one string is known in advance. Returns `True` if the two strings are equal, or `False` if they are not. .. versionadded:: 0.7 """ if isinstance(a, text_type): a = a.encode("utf-8") if isinstance(b, text_type): b = b.encode("utf-8") if _builtin_safe_str_cmp is not None: return _builtin_safe_str_cmp(a, b) if len(a) != len(b): return False rv = 0 if PY2: for x, y in izip(a, b): rv |= ord(x) ^ ord(y) else: for x, y in izip(a, b): rv |= x ^ y return rv == 0 def gen_salt(length): """Generate a random string of SALT_CHARS with specified ``length``.""" if length <= 0: raise ValueError("Salt length must be positive") return "".join(_sys_rng.choice(SALT_CHARS) for _ in range_type(length)) def _hash_internal(method, salt, password): """Internal password hash helper. Supports plaintext without salt, unsalted and salted passwords. In case salted passwords are used hmac is used. """ if method == "plain": return password, method if isinstance(password, text_type): password = password.encode("utf-8") if method.startswith("pbkdf2:"): args = method[7:].split(":") if len(args) not in (1, 2): raise ValueError("Invalid number of arguments for PBKDF2") method = args.pop(0) iterations = args and int(args[0] or 0) or DEFAULT_PBKDF2_ITERATIONS is_pbkdf2 = True actual_method = "pbkdf2:%s:%d" % (method, iterations) else: is_pbkdf2 = False actual_method = method if is_pbkdf2: if not salt: raise ValueError("Salt is required for PBKDF2") rv = pbkdf2_hex(password, salt, iterations, hashfunc=method) elif salt: if isinstance(salt, text_type): salt = salt.encode("utf-8") mac = _create_mac(salt, password, method) rv = mac.hexdigest() else: rv = hashlib.new(method, password).hexdigest() return rv, actual_method def _create_mac(key, msg, method): if callable(method): return hmac.HMAC(key, msg, method) def hashfunc(d=b""): return hashlib.new(method, d) # Python 2.7 used ``hasattr(digestmod, '__call__')`` # to detect if hashfunc is callable hashfunc.__call__ = hashfunc return hmac.HMAC(key, msg, hashfunc) def generate_password_hash(password, method="pbkdf2:sha256", salt_length=8): """Hash a password with the given method and salt with a string of the given length. The format of the string returned includes the method that was used so that :func:`check_password_hash` can check the hash. The format for the hashed string looks like this:: method$salt$hash This method can **not** generate unsalted passwords but it is possible to set param method='plain' in order to enforce plaintext passwords. If a salt is used, hmac is used internally to salt the password. If PBKDF2 is wanted it can be enabled by setting the method to ``pbkdf2:method:iterations`` where iterations is optional:: pbkdf2:sha256:80000$salt$hash pbkdf2:sha256$salt$hash :param password: the password to hash. :param method: the hash method to use (one that hashlib supports). Can optionally be in the format ``pbkdf2:<method>[:iterations]`` to enable PBKDF2. :param salt_length: the length of the salt in letters. """ salt = gen_salt(salt_length) if method != "plain" else "" h, actual_method = _hash_internal(method, salt, password) return "%s$%s$%s" % (actual_method, salt, h) def check_password_hash(pwhash, password): """check a password against a given salted and hashed password value. In order to support unsalted legacy passwords this method supports plain text passwords, md5 and sha1 hashes (both salted and unsalted). Returns `True` if the password matched, `False` otherwise. :param pwhash: a hashed string like returned by :func:`generate_password_hash`. :param password: the plaintext password to compare against the hash. """ if pwhash.count("$") < 2: return False method, salt, hashval = pwhash.split("$", 2) return safe_str_cmp(_hash_internal(method, salt, password)[0], hashval) def safe_join(directory, *pathnames): """Safely join zero or more untrusted path components to a base directory to avoid escaping the base directory. :param directory: The trusted base directory. :param pathnames: The untrusted path components relative to the base directory. :return: A safe path, otherwise ``None``. """ parts = [directory] for filename in pathnames: if filename != "": filename = posixpath.normpath(filename) if ( any(sep in filename for sep in _os_alt_seps) or os.path.isabs(filename) or filename == ".." or filename.startswith("../") ): return None parts.append(filename) return posixpath.join(*parts)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/__init__.py
""" werkzeug ~~~~~~~~ Werkzeug is the Swiss Army knife of Python web development. It provides useful classes and functions for any WSGI application to make the life of a Python web developer much easier. All of the provided classes are independent from each other so you can mix it with any other library. :copyright: 2007 Pallets :license: BSD-3-Clause """ from .serving import run_simple from .test import Client from .wrappers import Request from .wrappers import Response __version__ = "1.0.1"
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/test.py
# -*- coding: utf-8 -*- """ werkzeug.test ~~~~~~~~~~~~~ This module implements a client to WSGI applications for testing. :copyright: 2007 Pallets :license: BSD-3-Clause """ import mimetypes import sys from io import BytesIO from itertools import chain from random import random from tempfile import TemporaryFile from time import time from ._compat import iteritems from ._compat import iterlists from ._compat import itervalues from ._compat import make_literal_wrapper from ._compat import reraise from ._compat import string_types from ._compat import text_type from ._compat import to_bytes from ._compat import wsgi_encoding_dance from ._internal import _get_environ from .datastructures import CallbackDict from .datastructures import CombinedMultiDict from .datastructures import EnvironHeaders from .datastructures import FileMultiDict from .datastructures import Headers from .datastructures import MultiDict from .http import dump_cookie from .http import dump_options_header from .http import parse_options_header from .urls import iri_to_uri from .urls import url_encode from .urls import url_fix from .urls import url_parse from .urls import url_unparse from .urls import url_unquote from .utils import get_content_type from .wrappers import BaseRequest from .wsgi import ClosingIterator from .wsgi import get_current_url try: from urllib.request import Request as U2Request except ImportError: from urllib2 import Request as U2Request try: from http.cookiejar import CookieJar except ImportError: from cookielib import CookieJar def stream_encode_multipart( values, use_tempfile=True, threshold=1024 * 500, boundary=None, charset="utf-8" ): """Encode a dict of values (either strings or file descriptors or :class:`FileStorage` objects.) into a multipart encoded string stored in a file descriptor. """ if boundary is None: boundary = "---------------WerkzeugFormPart_%s%s" % (time(), random()) _closure = [BytesIO(), 0, False] if use_tempfile: def write_binary(string): stream, total_length, on_disk = _closure if on_disk: stream.write(string) else: length = len(string) if length + _closure[1] <= threshold: stream.write(string) else: new_stream = TemporaryFile("wb+") new_stream.write(stream.getvalue()) new_stream.write(string) _closure[0] = new_stream _closure[2] = True _closure[1] = total_length + length else: write_binary = _closure[0].write def write(string): write_binary(string.encode(charset)) if not isinstance(values, MultiDict): values = MultiDict(values) for key, values in iterlists(values): for value in values: write('--%s\r\nContent-Disposition: form-data; name="%s"' % (boundary, key)) reader = getattr(value, "read", None) if reader is not None: filename = getattr(value, "filename", getattr(value, "name", None)) content_type = getattr(value, "content_type", None) if content_type is None: content_type = ( filename and mimetypes.guess_type(filename)[0] or "application/octet-stream" ) if filename is not None: write('; filename="%s"\r\n' % filename) else: write("\r\n") write("Content-Type: %s\r\n\r\n" % content_type) while 1: chunk = reader(16384) if not chunk: break write_binary(chunk) else: if not isinstance(value, string_types): value = str(value) value = to_bytes(value, charset) write("\r\n\r\n") write_binary(value) write("\r\n") write("--%s--\r\n" % boundary) length = int(_closure[0].tell()) _closure[0].seek(0) return _closure[0], length, boundary def encode_multipart(values, boundary=None, charset="utf-8"): """Like `stream_encode_multipart` but returns a tuple in the form (``boundary``, ``data``) where data is a bytestring. """ stream, length, boundary = stream_encode_multipart( values, use_tempfile=False, boundary=boundary, charset=charset ) return boundary, stream.read() class _TestCookieHeaders(object): """A headers adapter for cookielib """ def __init__(self, headers): self.headers = headers def getheaders(self, name): headers = [] name = name.lower() for k, v in self.headers: if k.lower() == name: headers.append(v) return headers def get_all(self, name, default=None): rv = [] for k, v in self.headers: if k.lower() == name.lower(): rv.append(v) return rv or default or [] class _TestCookieResponse(object): """Something that looks like a httplib.HTTPResponse, but is actually just an adapter for our test responses to make them available for cookielib. """ def __init__(self, headers): self.headers = _TestCookieHeaders(headers) def info(self): return self.headers class _TestCookieJar(CookieJar): """A cookielib.CookieJar modified to inject and read cookie headers from and to wsgi environments, and wsgi application responses. """ def inject_wsgi(self, environ): """Inject the cookies as client headers into the server's wsgi environment. """ cvals = ["%s=%s" % (c.name, c.value) for c in self] if cvals: environ["HTTP_COOKIE"] = "; ".join(cvals) else: environ.pop("HTTP_COOKIE", None) def extract_wsgi(self, environ, headers): """Extract the server's set-cookie headers as cookies into the cookie jar. """ self.extract_cookies( _TestCookieResponse(headers), U2Request(get_current_url(environ)) ) def _iter_data(data): """Iterates over a `dict` or :class:`MultiDict` yielding all keys and values. This is used to iterate over the data passed to the :class:`EnvironBuilder`. """ if isinstance(data, MultiDict): for key, values in iterlists(data): for value in values: yield key, value else: for key, values in iteritems(data): if isinstance(values, list): for value in values: yield key, value else: yield key, values class EnvironBuilder(object): """This class can be used to conveniently create a WSGI environment for testing purposes. It can be used to quickly create WSGI environments or request objects from arbitrary data. The signature of this class is also used in some other places as of Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`, :meth:`Client.open`). Because of this most of the functionality is available through the constructor alone. Files and regular form data can be manipulated independently of each other with the :attr:`form` and :attr:`files` attributes, but are passed with the same argument to the constructor: `data`. `data` can be any of these values: - a `str` or `bytes` object: The object is converted into an :attr:`input_stream`, the :attr:`content_length` is set and you have to provide a :attr:`content_type`. - a `dict` or :class:`MultiDict`: The keys have to be strings. The values have to be either any of the following objects, or a list of any of the following objects: - a :class:`file`-like object: These are converted into :class:`FileStorage` objects automatically. - a `tuple`: The :meth:`~FileMultiDict.add_file` method is called with the key and the unpacked `tuple` items as positional arguments. - a `str`: The string is set as form data for the associated key. - a file-like object: The object content is loaded in memory and then handled like a regular `str` or a `bytes`. :param path: the path of the request. In the WSGI environment this will end up as `PATH_INFO`. If the `query_string` is not defined and there is a question mark in the `path` everything after it is used as query string. :param base_url: the base URL is a URL that is used to extract the WSGI URL scheme, host (server name + server port) and the script root (`SCRIPT_NAME`). :param query_string: an optional string or dict with URL parameters. :param method: the HTTP method to use, defaults to `GET`. :param input_stream: an optional input stream. Do not specify this and `data`. As soon as an input stream is set you can't modify :attr:`args` and :attr:`files` unless you set the :attr:`input_stream` to `None` again. :param content_type: The content type for the request. As of 0.5 you don't have to provide this when specifying files and form data via `data`. :param content_length: The content length for the request. You don't have to specify this when providing data via `data`. :param errors_stream: an optional error stream that is used for `wsgi.errors`. Defaults to :data:`stderr`. :param multithread: controls `wsgi.multithread`. Defaults to `False`. :param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`. :param run_once: controls `wsgi.run_once`. Defaults to `False`. :param headers: an optional list or :class:`Headers` object of headers. :param data: a string or dict of form data or a file-object. See explanation above. :param json: An object to be serialized and assigned to ``data``. Defaults the content type to ``"application/json"``. Serialized with the function assigned to :attr:`json_dumps`. :param environ_base: an optional dict of environment defaults. :param environ_overrides: an optional dict of environment overrides. :param charset: the charset used to encode unicode data. .. versionadded:: 0.15 The ``json`` param and :meth:`json_dumps` method. .. versionadded:: 0.15 The environ has keys ``REQUEST_URI`` and ``RAW_URI`` containing the path before perecent-decoding. This is not part of the WSGI PEP, but many WSGI servers include it. .. versionchanged:: 0.6 ``path`` and ``base_url`` can now be unicode strings that are encoded with :func:`iri_to_uri`. """ #: the server protocol to use. defaults to HTTP/1.1 server_protocol = "HTTP/1.1" #: the wsgi version to use. defaults to (1, 0) wsgi_version = (1, 0) #: the default request class for :meth:`get_request` request_class = BaseRequest import json #: The serialization function used when ``json`` is passed. json_dumps = staticmethod(json.dumps) del json def __init__( self, path="/", base_url=None, query_string=None, method="GET", input_stream=None, content_type=None, content_length=None, errors_stream=None, multithread=False, multiprocess=False, run_once=False, headers=None, data=None, environ_base=None, environ_overrides=None, charset="utf-8", mimetype=None, json=None, ): path_s = make_literal_wrapper(path) if query_string is not None and path_s("?") in path: raise ValueError("Query string is defined in the path and as an argument") if query_string is None and path_s("?") in path: path, query_string = path.split(path_s("?"), 1) self.charset = charset self.path = iri_to_uri(path) if base_url is not None: base_url = url_fix(iri_to_uri(base_url, charset), charset) self.base_url = base_url if isinstance(query_string, (bytes, text_type)): self.query_string = query_string else: if query_string is None: query_string = MultiDict() elif not isinstance(query_string, MultiDict): query_string = MultiDict(query_string) self.args = query_string self.method = method if headers is None: headers = Headers() elif not isinstance(headers, Headers): headers = Headers(headers) self.headers = headers if content_type is not None: self.content_type = content_type if errors_stream is None: errors_stream = sys.stderr self.errors_stream = errors_stream self.multithread = multithread self.multiprocess = multiprocess self.run_once = run_once self.environ_base = environ_base self.environ_overrides = environ_overrides self.input_stream = input_stream self.content_length = content_length self.closed = False if json is not None: if data is not None: raise TypeError("can't provide both json and data") data = self.json_dumps(json) if self.content_type is None: self.content_type = "application/json" if data: if input_stream is not None: raise TypeError("can't provide input stream and data") if hasattr(data, "read"): data = data.read() if isinstance(data, text_type): data = data.encode(self.charset) if isinstance(data, bytes): self.input_stream = BytesIO(data) if self.content_length is None: self.content_length = len(data) else: for key, value in _iter_data(data): if isinstance(value, (tuple, dict)) or hasattr(value, "read"): self._add_file_from_data(key, value) else: self.form.setlistdefault(key).append(value) if mimetype is not None: self.mimetype = mimetype @classmethod def from_environ(cls, environ, **kwargs): """Turn an environ dict back into a builder. Any extra kwargs override the args extracted from the environ. .. versionadded:: 0.15 """ headers = Headers(EnvironHeaders(environ)) out = { "path": environ["PATH_INFO"], "base_url": cls._make_base_url( environ["wsgi.url_scheme"], headers.pop("Host"), environ["SCRIPT_NAME"] ), "query_string": environ["QUERY_STRING"], "method": environ["REQUEST_METHOD"], "input_stream": environ["wsgi.input"], "content_type": headers.pop("Content-Type", None), "content_length": headers.pop("Content-Length", None), "errors_stream": environ["wsgi.errors"], "multithread": environ["wsgi.multithread"], "multiprocess": environ["wsgi.multiprocess"], "run_once": environ["wsgi.run_once"], "headers": headers, } out.update(kwargs) return cls(**out) def _add_file_from_data(self, key, value): """Called in the EnvironBuilder to add files from the data dict.""" if isinstance(value, tuple): self.files.add_file(key, *value) else: self.files.add_file(key, value) @staticmethod def _make_base_url(scheme, host, script_root): return url_unparse((scheme, host, script_root, "", "")).rstrip("/") + "/" @property def base_url(self): """The base URL is used to extract the URL scheme, host name, port, and root path. """ return self._make_base_url(self.url_scheme, self.host, self.script_root) @base_url.setter def base_url(self, value): if value is None: scheme = "http" netloc = "localhost" script_root = "" else: scheme, netloc, script_root, qs, anchor = url_parse(value) if qs or anchor: raise ValueError("base url must not contain a query string or fragment") self.script_root = script_root.rstrip("/") self.host = netloc self.url_scheme = scheme @property def content_type(self): """The content type for the request. Reflected from and to the :attr:`headers`. Do not set if you set :attr:`files` or :attr:`form` for auto detection. """ ct = self.headers.get("Content-Type") if ct is None and not self._input_stream: if self._files: return "multipart/form-data" if self._form: return "application/x-www-form-urlencoded" return None return ct @content_type.setter def content_type(self, value): if value is None: self.headers.pop("Content-Type", None) else: self.headers["Content-Type"] = value @property def mimetype(self): """The mimetype (content type without charset etc.) .. versionadded:: 0.14 """ ct = self.content_type return ct.split(";")[0].strip() if ct else None @mimetype.setter def mimetype(self, value): self.content_type = get_content_type(value, self.charset) @property def mimetype_params(self): """ The mimetype parameters as dict. For example if the content type is ``text/html; charset=utf-8`` the params would be ``{'charset': 'utf-8'}``. .. versionadded:: 0.14 """ def on_update(d): self.headers["Content-Type"] = dump_options_header(self.mimetype, d) d = parse_options_header(self.headers.get("content-type", ""))[1] return CallbackDict(d, on_update) @property def content_length(self): """The content length as integer. Reflected from and to the :attr:`headers`. Do not set if you set :attr:`files` or :attr:`form` for auto detection. """ return self.headers.get("Content-Length", type=int) @content_length.setter def content_length(self, value): if value is None: self.headers.pop("Content-Length", None) else: self.headers["Content-Length"] = str(value) def _get_form(self, name, storage): """Common behavior for getting the :attr:`form` and :attr:`files` properties. :param name: Name of the internal cached attribute. :param storage: Storage class used for the data. """ if self.input_stream is not None: raise AttributeError("an input stream is defined") rv = getattr(self, name) if rv is None: rv = storage() setattr(self, name, rv) return rv def _set_form(self, name, value): """Common behavior for setting the :attr:`form` and :attr:`files` properties. :param name: Name of the internal cached attribute. :param value: Value to assign to the attribute. """ self._input_stream = None setattr(self, name, value) @property def form(self): """A :class:`MultiDict` of form values.""" return self._get_form("_form", MultiDict) @form.setter def form(self, value): self._set_form("_form", value) @property def files(self): """A :class:`FileMultiDict` of uploaded files. Use :meth:`~FileMultiDict.add_file` to add new files. """ return self._get_form("_files", FileMultiDict) @files.setter def files(self, value): self._set_form("_files", value) @property def input_stream(self): """An optional input stream. If you set this it will clear :attr:`form` and :attr:`files`. """ return self._input_stream @input_stream.setter def input_stream(self, value): self._input_stream = value self._form = None self._files = None @property def query_string(self): """The query string. If you set this to a string :attr:`args` will no longer be available. """ if self._query_string is None: if self._args is not None: return url_encode(self._args, charset=self.charset) return "" return self._query_string @query_string.setter def query_string(self, value): self._query_string = value self._args = None @property def args(self): """The URL arguments as :class:`MultiDict`.""" if self._query_string is not None: raise AttributeError("a query string is defined") if self._args is None: self._args = MultiDict() return self._args @args.setter def args(self, value): self._query_string = None self._args = value @property def server_name(self): """The server name (read-only, use :attr:`host` to set)""" return self.host.split(":", 1)[0] @property def server_port(self): """The server port as integer (read-only, use :attr:`host` to set)""" pieces = self.host.split(":", 1) if len(pieces) == 2 and pieces[1].isdigit(): return int(pieces[1]) if self.url_scheme == "https": return 443 return 80 def __del__(self): try: self.close() except Exception: pass def close(self): """Closes all files. If you put real :class:`file` objects into the :attr:`files` dict you can call this method to automatically close them all in one go. """ if self.closed: return try: files = itervalues(self.files) except AttributeError: files = () for f in files: try: f.close() except Exception: pass self.closed = True def get_environ(self): """Return the built environ. .. versionchanged:: 0.15 The content type and length headers are set based on input stream detection. Previously this only set the WSGI keys. """ input_stream = self.input_stream content_length = self.content_length mimetype = self.mimetype content_type = self.content_type if input_stream is not None: start_pos = input_stream.tell() input_stream.seek(0, 2) end_pos = input_stream.tell() input_stream.seek(start_pos) content_length = end_pos - start_pos elif mimetype == "multipart/form-data": values = CombinedMultiDict([self.form, self.files]) input_stream, content_length, boundary = stream_encode_multipart( values, charset=self.charset ) content_type = mimetype + '; boundary="%s"' % boundary elif mimetype == "application/x-www-form-urlencoded": # XXX: py2v3 review values = url_encode(self.form, charset=self.charset) values = values.encode("ascii") content_length = len(values) input_stream = BytesIO(values) else: input_stream = BytesIO() result = {} if self.environ_base: result.update(self.environ_base) def _path_encode(x): return wsgi_encoding_dance(url_unquote(x, self.charset), self.charset) qs = wsgi_encoding_dance(self.query_string) result.update( { "REQUEST_METHOD": self.method, "SCRIPT_NAME": _path_encode(self.script_root), "PATH_INFO": _path_encode(self.path), "QUERY_STRING": qs, # Non-standard, added by mod_wsgi, uWSGI "REQUEST_URI": wsgi_encoding_dance(self.path), # Non-standard, added by gunicorn "RAW_URI": wsgi_encoding_dance(self.path), "SERVER_NAME": self.server_name, "SERVER_PORT": str(self.server_port), "HTTP_HOST": self.host, "SERVER_PROTOCOL": self.server_protocol, "wsgi.version": self.wsgi_version, "wsgi.url_scheme": self.url_scheme, "wsgi.input": input_stream, "wsgi.errors": self.errors_stream, "wsgi.multithread": self.multithread, "wsgi.multiprocess": self.multiprocess, "wsgi.run_once": self.run_once, } ) headers = self.headers.copy() if content_type is not None: result["CONTENT_TYPE"] = content_type headers.set("Content-Type", content_type) if content_length is not None: result["CONTENT_LENGTH"] = str(content_length) headers.set("Content-Length", content_length) for key, value in headers.to_wsgi_list(): result["HTTP_%s" % key.upper().replace("-", "_")] = value if self.environ_overrides: result.update(self.environ_overrides) return result def get_request(self, cls=None): """Returns a request with the data. If the request class is not specified :attr:`request_class` is used. :param cls: The request wrapper to use. """ if cls is None: cls = self.request_class return cls(self.get_environ()) class ClientRedirectError(Exception): """If a redirect loop is detected when using follow_redirects=True with the :cls:`Client`, then this exception is raised. """ class Client(object): """This class allows you to send requests to a wrapped application. The response wrapper can be a class or factory function that takes three arguments: app_iter, status and headers. The default response wrapper just returns a tuple. Example:: class ClientResponse(BaseResponse): ... client = Client(MyApplication(), response_wrapper=ClientResponse) The use_cookies parameter indicates whether cookies should be stored and sent for subsequent requests. This is True by default, but passing False will disable this behaviour. If you want to request some subdomain of your application you may set `allow_subdomain_redirects` to `True` as if not no external redirects are allowed. .. versionadded:: 0.5 `use_cookies` is new in this version. Older versions did not provide builtin cookie support. .. versionadded:: 0.14 The `mimetype` parameter was added. .. versionadded:: 0.15 The ``json`` parameter. """ def __init__( self, application, response_wrapper=None, use_cookies=True, allow_subdomain_redirects=False, ): self.application = application self.response_wrapper = response_wrapper if use_cookies: self.cookie_jar = _TestCookieJar() else: self.cookie_jar = None self.allow_subdomain_redirects = allow_subdomain_redirects def set_cookie( self, server_name, key, value="", max_age=None, expires=None, path="/", domain=None, secure=None, httponly=False, samesite=None, charset="utf-8", ): """Sets a cookie in the client's cookie jar. The server name is required and has to match the one that is also passed to the open call. """ assert self.cookie_jar is not None, "cookies disabled" header = dump_cookie( key, value, max_age, expires, path, domain, secure, httponly, charset, samesite=samesite, ) environ = create_environ(path, base_url="http://" + server_name) headers = [("Set-Cookie", header)] self.cookie_jar.extract_wsgi(environ, headers) def delete_cookie(self, server_name, key, path="/", domain=None): """Deletes a cookie in the test client.""" self.set_cookie( server_name, key, expires=0, max_age=0, path=path, domain=domain ) def run_wsgi_app(self, environ, buffered=False): """Runs the wrapped WSGI app with the given environment.""" if self.cookie_jar is not None: self.cookie_jar.inject_wsgi(environ) rv = run_wsgi_app(self.application, environ, buffered=buffered) if self.cookie_jar is not None: self.cookie_jar.extract_wsgi(environ, rv[2]) return rv def resolve_redirect(self, response, new_location, environ, buffered=False): """Perform a new request to the location given by the redirect response to the previous request. """ scheme, netloc, path, qs, anchor = url_parse(new_location) builder = EnvironBuilder.from_environ(environ, query_string=qs) to_name_parts = netloc.split(":", 1)[0].split(".") from_name_parts = builder.server_name.split(".") if to_name_parts != [""]: # The new location has a host, use it for the base URL. builder.url_scheme = scheme builder.host = netloc else: # A local redirect with autocorrect_location_header=False # doesn't have a host, so use the request's host. to_name_parts = from_name_parts # Explain why a redirect to a different server name won't be followed. if to_name_parts != from_name_parts: if to_name_parts[-len(from_name_parts) :] == from_name_parts: if not self.allow_subdomain_redirects: raise RuntimeError("Following subdomain redirects is not enabled.") else: raise RuntimeError("Following external redirects is not supported.") path_parts = path.split("/") root_parts = builder.script_root.split("/") if path_parts[: len(root_parts)] == root_parts: # Strip the script root from the path. builder.path = path[len(builder.script_root) :] else: # The new location is not under the script root, so use the # whole path and clear the previous root. builder.path = path builder.script_root = "" status_code = int(response[1].split(None, 1)[0]) # Only 307 and 308 preserve all of the original request. if status_code not in {307, 308}: # HEAD is preserved, everything else becomes GET. if builder.method != "HEAD": builder.method = "GET" # Clear the body and the headers that describe it. builder.input_stream = None builder.content_type = None builder.content_length = None builder.headers.pop("Transfer-Encoding", None) # Disable the response wrapper while handling redirects. Not # thread safe, but the client should not be shared anyway. old_response_wrapper = self.response_wrapper self.response_wrapper = None try: return self.open(builder, as_tuple=True, buffered=buffered) finally: self.response_wrapper = old_response_wrapper def open(self, *args, **kwargs): """Takes the same arguments as the :class:`EnvironBuilder` class with some additions: You can provide a :class:`EnvironBuilder` or a WSGI environment as only argument instead of the :class:`EnvironBuilder` arguments and two optional keyword arguments (`as_tuple`, `buffered`) that change the type of the return value or the way the application is executed. .. versionchanged:: 0.5 If a dict is provided as file in the dict for the `data` parameter the content type has to be called `content_type` now instead of `mimetype`. This change was made for consistency with :class:`werkzeug.FileWrapper`. The `follow_redirects` parameter was added to :func:`open`. Additional parameters: :param as_tuple: Returns a tuple in the form ``(environ, result)`` :param buffered: Set this to True to buffer the application run. This will automatically close the application for you as well. :param follow_redirects: Set this to True if the `Client` should follow HTTP redirects. """ as_tuple = kwargs.pop("as_tuple", False) buffered = kwargs.pop("buffered", False) follow_redirects = kwargs.pop("follow_redirects", False) environ = None if not kwargs and len(args) == 1: if isinstance(args[0], EnvironBuilder): environ = args[0].get_environ() elif isinstance(args[0], dict): environ = args[0] if environ is None: builder = EnvironBuilder(*args, **kwargs) try: environ = builder.get_environ() finally: builder.close() response = self.run_wsgi_app(environ.copy(), buffered=buffered) # handle redirects redirect_chain = [] while 1: status_code = int(response[1].split(None, 1)[0]) if ( status_code not in {301, 302, 303, 305, 307, 308} or not follow_redirects ): break # Exhaust intermediate response bodies to ensure middleware # that returns an iterator runs any cleanup code. if not buffered: for _ in response[0]: pass new_location = response[2]["location"] new_redirect_entry = (new_location, status_code) if new_redirect_entry in redirect_chain: raise ClientRedirectError("loop detected") redirect_chain.append(new_redirect_entry) environ, response = self.resolve_redirect( response, new_location, environ, buffered=buffered ) if self.response_wrapper is not None: response = self.response_wrapper(*response) if as_tuple: return environ, response return response def get(self, *args, **kw): """Like open but method is enforced to GET.""" kw["method"] = "GET" return self.open(*args, **kw) def patch(self, *args, **kw): """Like open but method is enforced to PATCH.""" kw["method"] = "PATCH" return self.open(*args, **kw) def post(self, *args, **kw): """Like open but method is enforced to POST.""" kw["method"] = "POST" return self.open(*args, **kw) def head(self, *args, **kw): """Like open but method is enforced to HEAD.""" kw["method"] = "HEAD" return self.open(*args, **kw) def put(self, *args, **kw): """Like open but method is enforced to PUT.""" kw["method"] = "PUT" return self.open(*args, **kw) def delete(self, *args, **kw): """Like open but method is enforced to DELETE.""" kw["method"] = "DELETE" return self.open(*args, **kw) def options(self, *args, **kw): """Like open but method is enforced to OPTIONS.""" kw["method"] = "OPTIONS" return self.open(*args, **kw) def trace(self, *args, **kw): """Like open but method is enforced to TRACE.""" kw["method"] = "TRACE" return self.open(*args, **kw) def __repr__(self): return "<%s %r>" % (self.__class__.__name__, self.application) def create_environ(*args, **kwargs): """Create a new WSGI environ dict based on the values passed. The first parameter should be the path of the request which defaults to '/'. The second one can either be an absolute path (in that case the host is localhost:80) or a full path to the request with scheme, netloc port and the path to the script. This accepts the same arguments as the :class:`EnvironBuilder` constructor. .. versionchanged:: 0.5 This function is now a thin wrapper over :class:`EnvironBuilder` which was added in 0.5. The `headers`, `environ_base`, `environ_overrides` and `charset` parameters were added. """ builder = EnvironBuilder(*args, **kwargs) try: return builder.get_environ() finally: builder.close() def run_wsgi_app(app, environ, buffered=False): """Return a tuple in the form (app_iter, status, headers) of the application output. This works best if you pass it an application that returns an iterator all the time. Sometimes applications may use the `write()` callable returned by the `start_response` function. This tries to resolve such edge cases automatically. But if you don't get the expected output you should set `buffered` to `True` which enforces buffering. If passed an invalid WSGI application the behavior of this function is undefined. Never pass non-conforming WSGI applications to this function. :param app: the application to execute. :param buffered: set to `True` to enforce buffering. :return: tuple in the form ``(app_iter, status, headers)`` """ environ = _get_environ(environ) response = [] buffer = [] def start_response(status, headers, exc_info=None): if exc_info is not None: reraise(*exc_info) response[:] = [status, headers] return buffer.append app_rv = app(environ, start_response) close_func = getattr(app_rv, "close", None) app_iter = iter(app_rv) # when buffering we emit the close call early and convert the # application iterator into a regular list if buffered: try: app_iter = list(app_iter) finally: if close_func is not None: close_func() # otherwise we iterate the application iter until we have a response, chain # the already received data with the already collected data and wrap it in # a new `ClosingIterator` if we need to restore a `close` callable from the # original return value. else: for item in app_iter: buffer.append(item) if response: break if buffer: app_iter = chain(buffer, app_iter) if close_func is not None and app_iter is not app_rv: app_iter = ClosingIterator(app_iter, close_func) return app_iter, response[0], Headers(response[1])
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/formparser.py
# -*- coding: utf-8 -*- """ werkzeug.formparser ~~~~~~~~~~~~~~~~~~~ This module implements the form parsing. It supports url-encoded forms as well as non-nested multipart uploads. :copyright: 2007 Pallets :license: BSD-3-Clause """ import codecs import re from functools import update_wrapper from itertools import chain from itertools import repeat from itertools import tee from . import exceptions from ._compat import BytesIO from ._compat import text_type from ._compat import to_native from .datastructures import FileStorage from .datastructures import Headers from .datastructures import MultiDict from .http import parse_options_header from .urls import url_decode_stream from .wsgi import get_content_length from .wsgi import get_input_stream from .wsgi import make_line_iter # there are some platforms where SpooledTemporaryFile is not available. # In that case we need to provide a fallback. try: from tempfile import SpooledTemporaryFile except ImportError: from tempfile import TemporaryFile SpooledTemporaryFile = None #: an iterator that yields empty strings _empty_string_iter = repeat("") #: a regular expression for multipart boundaries _multipart_boundary_re = re.compile("^[ -~]{0,200}[!-~]$") #: supported http encodings that are also available in python we support #: for multipart messages. _supported_multipart_encodings = frozenset(["base64", "quoted-printable"]) def default_stream_factory( total_content_length, filename, content_type, content_length=None ): """The stream factory that is used per default.""" max_size = 1024 * 500 if SpooledTemporaryFile is not None: return SpooledTemporaryFile(max_size=max_size, mode="wb+") if total_content_length is None or total_content_length > max_size: return TemporaryFile("wb+") return BytesIO() def parse_form_data( environ, stream_factory=None, charset="utf-8", errors="replace", max_form_memory_size=None, max_content_length=None, cls=None, silent=True, ): """Parse the form data in the environ and return it as tuple in the form ``(stream, form, files)``. You should only call this method if the transport method is `POST`, `PUT`, or `PATCH`. If the mimetype of the data transmitted is `multipart/form-data` the files multidict will be filled with `FileStorage` objects. If the mimetype is unknown the input stream is wrapped and returned as first argument, else the stream is empty. This is a shortcut for the common usage of :class:`FormDataParser`. Have a look at :ref:`dealing-with-request-data` for more details. .. versionadded:: 0.5 The `max_form_memory_size`, `max_content_length` and `cls` parameters were added. .. versionadded:: 0.5.1 The optional `silent` flag was added. :param environ: the WSGI environment to be used for parsing. :param stream_factory: An optional callable that returns a new read and writeable file descriptor. This callable works the same as :meth:`~BaseResponse._get_file_stream`. :param charset: The character set for URL and url encoded form data. :param errors: The encoding error behavior. :param max_form_memory_size: the maximum number of bytes to be accepted for in-memory stored form data. If the data exceeds the value specified an :exc:`~exceptions.RequestEntityTooLarge` exception is raised. :param max_content_length: If this is provided and the transmitted data is longer than this value an :exc:`~exceptions.RequestEntityTooLarge` exception is raised. :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`MultiDict` is used. :param silent: If set to False parsing errors will not be caught. :return: A tuple in the form ``(stream, form, files)``. """ return FormDataParser( stream_factory, charset, errors, max_form_memory_size, max_content_length, cls, silent, ).parse_from_environ(environ) def exhaust_stream(f): """Helper decorator for methods that exhausts the stream on return.""" def wrapper(self, stream, *args, **kwargs): try: return f(self, stream, *args, **kwargs) finally: exhaust = getattr(stream, "exhaust", None) if exhaust is not None: exhaust() else: while 1: chunk = stream.read(1024 * 64) if not chunk: break return update_wrapper(wrapper, f) class FormDataParser(object): """This class implements parsing of form data for Werkzeug. By itself it can parse multipart and url encoded form data. It can be subclassed and extended but for most mimetypes it is a better idea to use the untouched stream and expose it as separate attributes on a request object. .. versionadded:: 0.8 :param stream_factory: An optional callable that returns a new read and writeable file descriptor. This callable works the same as :meth:`~BaseResponse._get_file_stream`. :param charset: The character set for URL and url encoded form data. :param errors: The encoding error behavior. :param max_form_memory_size: the maximum number of bytes to be accepted for in-memory stored form data. If the data exceeds the value specified an :exc:`~exceptions.RequestEntityTooLarge` exception is raised. :param max_content_length: If this is provided and the transmitted data is longer than this value an :exc:`~exceptions.RequestEntityTooLarge` exception is raised. :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`MultiDict` is used. :param silent: If set to False parsing errors will not be caught. """ def __init__( self, stream_factory=None, charset="utf-8", errors="replace", max_form_memory_size=None, max_content_length=None, cls=None, silent=True, ): if stream_factory is None: stream_factory = default_stream_factory self.stream_factory = stream_factory self.charset = charset self.errors = errors self.max_form_memory_size = max_form_memory_size self.max_content_length = max_content_length if cls is None: cls = MultiDict self.cls = cls self.silent = silent def get_parse_func(self, mimetype, options): return self.parse_functions.get(mimetype) def parse_from_environ(self, environ): """Parses the information from the environment as form data. :param environ: the WSGI environment to be used for parsing. :return: A tuple in the form ``(stream, form, files)``. """ content_type = environ.get("CONTENT_TYPE", "") content_length = get_content_length(environ) mimetype, options = parse_options_header(content_type) return self.parse(get_input_stream(environ), mimetype, content_length, options) def parse(self, stream, mimetype, content_length, options=None): """Parses the information from the given stream, mimetype, content length and mimetype parameters. :param stream: an input stream :param mimetype: the mimetype of the data :param content_length: the content length of the incoming data :param options: optional mimetype parameters (used for the multipart boundary for instance) :return: A tuple in the form ``(stream, form, files)``. """ if ( self.max_content_length is not None and content_length is not None and content_length > self.max_content_length ): raise exceptions.RequestEntityTooLarge() if options is None: options = {} parse_func = self.get_parse_func(mimetype, options) if parse_func is not None: try: return parse_func(self, stream, mimetype, content_length, options) except ValueError: if not self.silent: raise return stream, self.cls(), self.cls() @exhaust_stream def _parse_multipart(self, stream, mimetype, content_length, options): parser = MultiPartParser( self.stream_factory, self.charset, self.errors, max_form_memory_size=self.max_form_memory_size, cls=self.cls, ) boundary = options.get("boundary") if boundary is None: raise ValueError("Missing boundary") if isinstance(boundary, text_type): boundary = boundary.encode("ascii") form, files = parser.parse(stream, boundary, content_length) return stream, form, files @exhaust_stream def _parse_urlencoded(self, stream, mimetype, content_length, options): if ( self.max_form_memory_size is not None and content_length is not None and content_length > self.max_form_memory_size ): raise exceptions.RequestEntityTooLarge() form = url_decode_stream(stream, self.charset, errors=self.errors, cls=self.cls) return stream, form, self.cls() #: mapping of mimetypes to parsing functions parse_functions = { "multipart/form-data": _parse_multipart, "application/x-www-form-urlencoded": _parse_urlencoded, "application/x-url-encoded": _parse_urlencoded, } def is_valid_multipart_boundary(boundary): """Checks if the string given is a valid multipart boundary.""" return _multipart_boundary_re.match(boundary) is not None def _line_parse(line): """Removes line ending characters and returns a tuple (`stripped_line`, `is_terminated`). """ if line[-2:] in ["\r\n", b"\r\n"]: return line[:-2], True elif line[-1:] in ["\r", "\n", b"\r", b"\n"]: return line[:-1], True return line, False def parse_multipart_headers(iterable): """Parses multipart headers from an iterable that yields lines (including the trailing newline symbol). The iterable has to be newline terminated. The iterable will stop at the line where the headers ended so it can be further consumed. :param iterable: iterable of strings that are newline terminated """ result = [] for line in iterable: line = to_native(line) line, line_terminated = _line_parse(line) if not line_terminated: raise ValueError("unexpected end of line in multipart header") if not line: break elif line[0] in " \t" and result: key, value = result[-1] result[-1] = (key, value + "\n " + line[1:]) else: parts = line.split(":", 1) if len(parts) == 2: result.append((parts[0].strip(), parts[1].strip())) # we link the list to the headers, no need to create a copy, the # list was not shared anyways. return Headers(result) _begin_form = "begin_form" _begin_file = "begin_file" _cont = "cont" _end = "end" class MultiPartParser(object): def __init__( self, stream_factory=None, charset="utf-8", errors="replace", max_form_memory_size=None, cls=None, buffer_size=64 * 1024, ): self.charset = charset self.errors = errors self.max_form_memory_size = max_form_memory_size self.stream_factory = ( default_stream_factory if stream_factory is None else stream_factory ) self.cls = MultiDict if cls is None else cls # make sure the buffer size is divisible by four so that we can base64 # decode chunk by chunk assert buffer_size % 4 == 0, "buffer size has to be divisible by 4" # also the buffer size has to be at least 1024 bytes long or long headers # will freak out the system assert buffer_size >= 1024, "buffer size has to be at least 1KB" self.buffer_size = buffer_size def _fix_ie_filename(self, filename): """Internet Explorer 6 transmits the full file name if a file is uploaded. This function strips the full path if it thinks the filename is Windows-like absolute. """ if filename[1:3] == ":\\" or filename[:2] == "\\\\": return filename.split("\\")[-1] return filename def _find_terminator(self, iterator): """The terminator might have some additional newlines before it. There is at least one application that sends additional newlines before headers (the python setuptools package). """ for line in iterator: if not line: break line = line.strip() if line: return line return b"" def fail(self, message): raise ValueError(message) def get_part_encoding(self, headers): transfer_encoding = headers.get("content-transfer-encoding") if ( transfer_encoding is not None and transfer_encoding in _supported_multipart_encodings ): return transfer_encoding def get_part_charset(self, headers): # Figure out input charset for current part content_type = headers.get("content-type") if content_type: mimetype, ct_params = parse_options_header(content_type) return ct_params.get("charset", self.charset) return self.charset def start_file_streaming(self, filename, headers, total_content_length): if isinstance(filename, bytes): filename = filename.decode(self.charset, self.errors) filename = self._fix_ie_filename(filename) content_type = headers.get("content-type") try: content_length = int(headers["content-length"]) except (KeyError, ValueError): content_length = 0 container = self.stream_factory( total_content_length=total_content_length, filename=filename, content_type=content_type, content_length=content_length, ) return filename, container def in_memory_threshold_reached(self, bytes): raise exceptions.RequestEntityTooLarge() def validate_boundary(self, boundary): if not boundary: self.fail("Missing boundary") if not is_valid_multipart_boundary(boundary): self.fail("Invalid boundary: %s" % boundary) if len(boundary) > self.buffer_size: # pragma: no cover # this should never happen because we check for a minimum size # of 1024 and boundaries may not be longer than 200. The only # situation when this happens is for non debug builds where # the assert is skipped. self.fail("Boundary longer than buffer size") def parse_lines(self, file, boundary, content_length, cap_at_buffer=True): """Generate parts of ``('begin_form', (headers, name))`` ``('begin_file', (headers, name, filename))`` ``('cont', bytestring)`` ``('end', None)`` Always obeys the grammar parts = ( begin_form cont* end | begin_file cont* end )* """ next_part = b"--" + boundary last_part = next_part + b"--" iterator = chain( make_line_iter( file, limit=content_length, buffer_size=self.buffer_size, cap_at_buffer=cap_at_buffer, ), _empty_string_iter, ) terminator = self._find_terminator(iterator) if terminator == last_part: return elif terminator != next_part: self.fail("Expected boundary at start of multipart data") while terminator != last_part: headers = parse_multipart_headers(iterator) disposition = headers.get("content-disposition") if disposition is None: self.fail("Missing Content-Disposition header") disposition, extra = parse_options_header(disposition) transfer_encoding = self.get_part_encoding(headers) name = extra.get("name") filename = extra.get("filename") # if no content type is given we stream into memory. A list is # used as a temporary container. if filename is None: yield _begin_form, (headers, name) # otherwise we parse the rest of the headers and ask the stream # factory for something we can write in. else: yield _begin_file, (headers, name, filename) buf = b"" for line in iterator: if not line: self.fail("unexpected end of stream") if line[:2] == b"--": terminator = line.rstrip() if terminator in (next_part, last_part): break if transfer_encoding is not None: if transfer_encoding == "base64": transfer_encoding = "base64_codec" try: line = codecs.decode(line, transfer_encoding) except Exception: self.fail("could not decode transfer encoded chunk") # we have something in the buffer from the last iteration. # this is usually a newline delimiter. if buf: yield _cont, buf buf = b"" # If the line ends with windows CRLF we write everything except # the last two bytes. In all other cases however we write # everything except the last byte. If it was a newline, that's # fine, otherwise it does not matter because we will write it # the next iteration. this ensures we do not write the # final newline into the stream. That way we do not have to # truncate the stream. However we do have to make sure that # if something else than a newline is in there we write it # out. if line[-2:] == b"\r\n": buf = b"\r\n" cutoff = -2 else: buf = line[-1:] cutoff = -1 yield _cont, line[:cutoff] else: # pragma: no cover raise ValueError("unexpected end of part") # if we have a leftover in the buffer that is not a newline # character we have to flush it, otherwise we will chop of # certain values. if buf not in (b"", b"\r", b"\n", b"\r\n"): yield _cont, buf yield _end, None def parse_parts(self, file, boundary, content_length): """Generate ``('file', (name, val))`` and ``('form', (name, val))`` parts. """ in_memory = 0 for ellt, ell in self.parse_lines(file, boundary, content_length): if ellt == _begin_file: headers, name, filename = ell is_file = True guard_memory = False filename, container = self.start_file_streaming( filename, headers, content_length ) _write = container.write elif ellt == _begin_form: headers, name = ell is_file = False container = [] _write = container.append guard_memory = self.max_form_memory_size is not None elif ellt == _cont: _write(ell) # if we write into memory and there is a memory size limit we # count the number of bytes in memory and raise an exception if # there is too much data in memory. if guard_memory: in_memory += len(ell) if in_memory > self.max_form_memory_size: self.in_memory_threshold_reached(in_memory) elif ellt == _end: if is_file: container.seek(0) yield ( "file", (name, FileStorage(container, filename, name, headers=headers)), ) else: part_charset = self.get_part_charset(headers) yield ( "form", (name, b"".join(container).decode(part_charset, self.errors)), ) def parse(self, file, boundary, content_length): formstream, filestream = tee( self.parse_parts(file, boundary, content_length), 2 ) form = (p[1] for p in formstream if p[0] == "form") files = (p[1] for p in filestream if p[0] == "file") return self.cls(form), self.cls(files)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/posixemulation.py
# -*- coding: utf-8 -*- r""" werkzeug.posixemulation ~~~~~~~~~~~~~~~~~~~~~~~ Provides a POSIX emulation for some features that are relevant to web applications. The main purpose is to simplify support for systems such as Windows NT that are not 100% POSIX compatible. Currently this only implements a :func:`rename` function that follows POSIX semantics. Eg: if the target file already exists it will be replaced without asking. This module was introduced in 0.6.1 and is not a public interface. It might become one in later versions of Werkzeug. :copyright: 2007 Pallets :license: BSD-3-Clause """ import errno import os import random import sys import time from ._compat import to_unicode from .filesystem import get_filesystem_encoding can_rename_open_file = False if os.name == "nt": try: import ctypes _MOVEFILE_REPLACE_EXISTING = 0x1 _MOVEFILE_WRITE_THROUGH = 0x8 _MoveFileEx = ctypes.windll.kernel32.MoveFileExW def _rename(src, dst): src = to_unicode(src, get_filesystem_encoding()) dst = to_unicode(dst, get_filesystem_encoding()) if _rename_atomic(src, dst): return True retry = 0 rv = False while not rv and retry < 100: rv = _MoveFileEx( src, dst, _MOVEFILE_REPLACE_EXISTING | _MOVEFILE_WRITE_THROUGH ) if not rv: time.sleep(0.001) retry += 1 return rv # new in Vista and Windows Server 2008 _CreateTransaction = ctypes.windll.ktmw32.CreateTransaction _CommitTransaction = ctypes.windll.ktmw32.CommitTransaction _MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW _CloseHandle = ctypes.windll.kernel32.CloseHandle can_rename_open_file = True def _rename_atomic(src, dst): ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, "Werkzeug rename") if ta == -1: return False try: retry = 0 rv = False while not rv and retry < 100: rv = _MoveFileTransacted( src, dst, None, None, _MOVEFILE_REPLACE_EXISTING | _MOVEFILE_WRITE_THROUGH, ta, ) if rv: rv = _CommitTransaction(ta) break else: time.sleep(0.001) retry += 1 return rv finally: _CloseHandle(ta) except Exception: def _rename(src, dst): return False def _rename_atomic(src, dst): return False def rename(src, dst): # Try atomic or pseudo-atomic rename if _rename(src, dst): return # Fall back to "move away and replace" try: os.rename(src, dst) except OSError as e: if e.errno != errno.EEXIST: raise old = "%s-%08x" % (dst, random.randint(0, sys.maxsize)) os.rename(dst, old) os.rename(src, dst) try: os.unlink(old) except Exception: pass else: rename = os.rename can_rename_open_file = True
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/utils.py
# -*- coding: utf-8 -*- """ werkzeug.utils ~~~~~~~~~~~~~~ This module implements various utilities for WSGI applications. Most of them are used by the request and response wrappers but especially for middleware development it makes sense to use them without the wrappers. :copyright: 2007 Pallets :license: BSD-3-Clause """ import codecs import os import pkgutil import re import sys from ._compat import iteritems from ._compat import PY2 from ._compat import reraise from ._compat import string_types from ._compat import text_type from ._compat import unichr from ._internal import _DictAccessorProperty from ._internal import _missing from ._internal import _parse_signature try: from html.entities import name2codepoint except ImportError: from htmlentitydefs import name2codepoint _format_re = re.compile(r"\$(?:(%s)|\{(%s)\})" % (("[a-zA-Z_][a-zA-Z0-9_]*",) * 2)) _entity_re = re.compile(r"&([^;]+);") _filename_ascii_strip_re = re.compile(r"[^A-Za-z0-9_.-]") _windows_device_files = ( "CON", "AUX", "COM1", "COM2", "COM3", "COM4", "LPT1", "LPT2", "LPT3", "PRN", "NUL", ) class cached_property(property): """A decorator that converts a function into a lazy property. The function wrapped is called the first time to retrieve the result and then that calculated result is used the next time you access the value:: class Foo(object): @cached_property def foo(self): # calculate something important here return 42 The class has to have a `__dict__` in order for this property to work. """ # implementation detail: A subclass of python's builtin property # decorator, we override __get__ to check for a cached value. If one # chooses to invoke __get__ by hand the property will still work as # expected because the lookup logic is replicated in __get__ for # manual invocation. def __init__(self, func, name=None, doc=None): self.__name__ = name or func.__name__ self.__module__ = func.__module__ self.__doc__ = doc or func.__doc__ self.func = func def __set__(self, obj, value): obj.__dict__[self.__name__] = value def __get__(self, obj, type=None): if obj is None: return self value = obj.__dict__.get(self.__name__, _missing) if value is _missing: value = self.func(obj) obj.__dict__[self.__name__] = value return value def invalidate_cached_property(obj, name): """Invalidates the cache for a :class:`cached_property`: >>> class Test(object): ... @cached_property ... def magic_number(self): ... print("recalculating...") ... return 42 ... >>> var = Test() >>> var.magic_number recalculating... 42 >>> var.magic_number 42 >>> invalidate_cached_property(var, "magic_number") >>> var.magic_number recalculating... 42 You must pass the name of the cached property as the second argument. """ if not isinstance(getattr(obj.__class__, name, None), cached_property): raise TypeError( "Attribute {} of object {} is not a cached_property, " "cannot be invalidated".format(name, obj) ) obj.__dict__[name] = _missing class environ_property(_DictAccessorProperty): """Maps request attributes to environment variables. This works not only for the Werzeug request object, but also any other class with an environ attribute: >>> class Test(object): ... environ = {'key': 'value'} ... test = environ_property('key') >>> var = Test() >>> var.test 'value' If you pass it a second value it's used as default if the key does not exist, the third one can be a converter that takes a value and converts it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value is used. If no default value is provided `None` is used. Per default the property is read only. You have to explicitly enable it by passing ``read_only=False`` to the constructor. """ read_only = True def lookup(self, obj): return obj.environ class header_property(_DictAccessorProperty): """Like `environ_property` but for headers.""" def lookup(self, obj): return obj.headers class HTMLBuilder(object): """Helper object for HTML generation. Per default there are two instances of that class. The `html` one, and the `xhtml` one for those two dialects. The class uses keyword parameters and positional parameters to generate small snippets of HTML. Keyword parameters are converted to XML/SGML attributes, positional arguments are used as children. Because Python accepts positional arguments before keyword arguments it's a good idea to use a list with the star-syntax for some children: >>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ', ... html.a('bar', href='bar.html')]) u'<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>' This class works around some browser limitations and can not be used for arbitrary SGML/XML generation. For that purpose lxml and similar libraries exist. Calling the builder escapes the string passed: >>> html.p(html("<foo>")) u'<p>&lt;foo&gt;</p>' """ _entity_re = re.compile(r"&([^;]+);") _entities = name2codepoint.copy() _entities["apos"] = 39 _empty_elements = { "area", "base", "basefont", "br", "col", "command", "embed", "frame", "hr", "img", "input", "keygen", "isindex", "link", "meta", "param", "source", "wbr", } _boolean_attributes = { "selected", "checked", "compact", "declare", "defer", "disabled", "ismap", "multiple", "nohref", "noresize", "noshade", "nowrap", } _plaintext_elements = {"textarea"} _c_like_cdata = {"script", "style"} def __init__(self, dialect): self._dialect = dialect def __call__(self, s): return escape(s) def __getattr__(self, tag): if tag[:2] == "__": raise AttributeError(tag) def proxy(*children, **arguments): buffer = "<" + tag for key, value in iteritems(arguments): if value is None: continue if key[-1] == "_": key = key[:-1] if key in self._boolean_attributes: if not value: continue if self._dialect == "xhtml": value = '="' + key + '"' else: value = "" else: value = '="' + escape(value) + '"' buffer += " " + key + value if not children and tag in self._empty_elements: if self._dialect == "xhtml": buffer += " />" else: buffer += ">" return buffer buffer += ">" children_as_string = "".join( [text_type(x) for x in children if x is not None] ) if children_as_string: if tag in self._plaintext_elements: children_as_string = escape(children_as_string) elif tag in self._c_like_cdata and self._dialect == "xhtml": children_as_string = ( "/*<![CDATA[*/" + children_as_string + "/*]]>*/" ) buffer += children_as_string + "</" + tag + ">" return buffer return proxy def __repr__(self): return "<%s for %r>" % (self.__class__.__name__, self._dialect) html = HTMLBuilder("html") xhtml = HTMLBuilder("xhtml") # https://cgit.freedesktop.org/xdg/shared-mime-info/tree/freedesktop.org.xml.in # https://www.iana.org/assignments/media-types/media-types.xhtml # Types listed in the XDG mime info that have a charset in the IANA registration. _charset_mimetypes = { "application/ecmascript", "application/javascript", "application/sql", "application/xml", "application/xml-dtd", "application/xml-external-parsed-entity", } def get_content_type(mimetype, charset): """Returns the full content type string with charset for a mimetype. If the mimetype represents text, the charset parameter will be appended, otherwise the mimetype is returned unchanged. :param mimetype: The mimetype to be used as content type. :param charset: The charset to be appended for text mimetypes. :return: The content type. .. versionchanged:: 0.15 Any type that ends with ``+xml`` gets a charset, not just those that start with ``application/``. Known text types such as ``application/javascript`` are also given charsets. """ if ( mimetype.startswith("text/") or mimetype in _charset_mimetypes or mimetype.endswith("+xml") ): mimetype += "; charset=" + charset return mimetype def detect_utf_encoding(data): """Detect which UTF encoding was used to encode the given bytes. The latest JSON standard (:rfc:`8259`) suggests that only UTF-8 is accepted. Older documents allowed 8, 16, or 32. 16 and 32 can be big or little endian. Some editors or libraries may prepend a BOM. :internal: :param data: Bytes in unknown UTF encoding. :return: UTF encoding name .. versionadded:: 0.15 """ head = data[:4] if head[:3] == codecs.BOM_UTF8: return "utf-8-sig" if b"\x00" not in head: return "utf-8" if head in (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE): return "utf-32" if head[:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE): return "utf-16" if len(head) == 4: if head[:3] == b"\x00\x00\x00": return "utf-32-be" if head[::2] == b"\x00\x00": return "utf-16-be" if head[1:] == b"\x00\x00\x00": return "utf-32-le" if head[1::2] == b"\x00\x00": return "utf-16-le" if len(head) == 2: return "utf-16-be" if head.startswith(b"\x00") else "utf-16-le" return "utf-8" def format_string(string, context): """String-template format a string: >>> format_string('$foo and ${foo}s', dict(foo=42)) '42 and 42s' This does not do any attribute lookup etc. For more advanced string formattings have a look at the `werkzeug.template` module. :param string: the format string. :param context: a dict with the variables to insert. """ def lookup_arg(match): x = context[match.group(1) or match.group(2)] if not isinstance(x, string_types): x = type(string)(x) return x return _format_re.sub(lookup_arg, string) def secure_filename(filename): r"""Pass it a filename and it will return a secure version of it. This filename can then safely be stored on a regular file system and passed to :func:`os.path.join`. The filename returned is an ASCII only string for maximum portability. On windows systems the function also makes sure that the file is not named after one of the special device files. >>> secure_filename("My cool movie.mov") 'My_cool_movie.mov' >>> secure_filename("../../../etc/passwd") 'etc_passwd' >>> secure_filename(u'i contain cool \xfcml\xe4uts.txt') 'i_contain_cool_umlauts.txt' The function might return an empty filename. It's your responsibility to ensure that the filename is unique and that you abort or generate a random filename if the function returned an empty one. .. versionadded:: 0.5 :param filename: the filename to secure """ if isinstance(filename, text_type): from unicodedata import normalize filename = normalize("NFKD", filename).encode("ascii", "ignore") if not PY2: filename = filename.decode("ascii") for sep in os.path.sep, os.path.altsep: if sep: filename = filename.replace(sep, " ") filename = str(_filename_ascii_strip_re.sub("", "_".join(filename.split()))).strip( "._" ) # on nt a couple of special files are present in each folder. We # have to ensure that the target file is not such a filename. In # this case we prepend an underline if ( os.name == "nt" and filename and filename.split(".")[0].upper() in _windows_device_files ): filename = "_" + filename return filename def escape(s): """Replace special characters "&", "<", ">" and (") to HTML-safe sequences. There is a special handling for `None` which escapes to an empty string. .. versionchanged:: 0.9 `quote` is now implicitly on. :param s: the string to escape. :param quote: ignored. """ if s is None: return "" elif hasattr(s, "__html__"): return text_type(s.__html__()) if not isinstance(s, string_types): s = text_type(s) return ( s.replace("&", "&amp;") .replace("<", "&lt;") .replace(">", "&gt;") .replace('"', "&quot;") ) def unescape(s): """The reverse function of `escape`. This unescapes all the HTML entities, not only the XML entities inserted by `escape`. :param s: the string to unescape. """ def handle_match(m): name = m.group(1) if name in HTMLBuilder._entities: return unichr(HTMLBuilder._entities[name]) try: if name[:2] in ("#x", "#X"): return unichr(int(name[2:], 16)) elif name.startswith("#"): return unichr(int(name[1:])) except ValueError: pass return u"" return _entity_re.sub(handle_match, s) def redirect(location, code=302, Response=None): """Returns a response object (a WSGI application) that, if called, redirects the client to the target location. Supported codes are 301, 302, 303, 305, 307, and 308. 300 is not supported because it's not a real redirect and 304 because it's the answer for a request with a request with defined If-Modified-Since headers. .. versionadded:: 0.6 The location can now be a unicode string that is encoded using the :func:`iri_to_uri` function. .. versionadded:: 0.10 The class used for the Response object can now be passed in. :param location: the location the response should redirect to. :param code: the redirect status code. defaults to 302. :param class Response: a Response class to use when instantiating a response. The default is :class:`werkzeug.wrappers.Response` if unspecified. """ if Response is None: from .wrappers import Response display_location = escape(location) if isinstance(location, text_type): # Safe conversion is necessary here as we might redirect # to a broken URI scheme (for instance itms-services). from .urls import iri_to_uri location = iri_to_uri(location, safe_conversion=True) response = Response( '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n' "<title>Redirecting...</title>\n" "<h1>Redirecting...</h1>\n" "<p>You should be redirected automatically to target URL: " '<a href="%s">%s</a>. If not click the link.' % (escape(location), display_location), code, mimetype="text/html", ) response.headers["Location"] = location return response def append_slash_redirect(environ, code=301): """Redirects to the same URL but with a slash appended. The behavior of this function is undefined if the path ends with a slash already. :param environ: the WSGI environment for the request that triggers the redirect. :param code: the status code for the redirect. """ new_path = environ["PATH_INFO"].strip("/") + "/" query_string = environ.get("QUERY_STRING") if query_string: new_path += "?" + query_string return redirect(new_path, code) def import_string(import_name, silent=False): """Imports an object based on a string. This is useful if you want to use import paths as endpoints or something similar. An import path can be specified either in dotted notation (``xml.sax.saxutils.escape``) or with a colon as object delimiter (``xml.sax.saxutils:escape``). If `silent` is True the return value will be `None` if the import fails. :param import_name: the dotted name for the object to import. :param silent: if set to `True` import errors are ignored and `None` is returned instead. :return: imported object """ # force the import name to automatically convert to strings # __import__ is not able to handle unicode strings in the fromlist # if the module is a package import_name = str(import_name).replace(":", ".") try: try: __import__(import_name) except ImportError: if "." not in import_name: raise else: return sys.modules[import_name] module_name, obj_name = import_name.rsplit(".", 1) module = __import__(module_name, globals(), locals(), [obj_name]) try: return getattr(module, obj_name) except AttributeError as e: raise ImportError(e) except ImportError as e: if not silent: reraise( ImportStringError, ImportStringError(import_name, e), sys.exc_info()[2] ) def find_modules(import_path, include_packages=False, recursive=False): """Finds all the modules below a package. This can be useful to automatically import all views / controllers so that their metaclasses / function decorators have a chance to register themselves on the application. Packages are not returned unless `include_packages` is `True`. This can also recursively list modules but in that case it will import all the packages to get the correct load path of that module. :param import_path: the dotted name for the package to find child modules. :param include_packages: set to `True` if packages should be returned, too. :param recursive: set to `True` if recursion should happen. :return: generator """ module = import_string(import_path) path = getattr(module, "__path__", None) if path is None: raise ValueError("%r is not a package" % import_path) basename = module.__name__ + "." for _importer, modname, ispkg in pkgutil.iter_modules(path): modname = basename + modname if ispkg: if include_packages: yield modname if recursive: for item in find_modules(modname, include_packages, True): yield item else: yield modname def validate_arguments(func, args, kwargs, drop_extra=True): """Checks if the function accepts the arguments and keyword arguments. Returns a new ``(args, kwargs)`` tuple that can safely be passed to the function without causing a `TypeError` because the function signature is incompatible. If `drop_extra` is set to `True` (which is the default) any extra positional or keyword arguments are dropped automatically. The exception raised provides three attributes: `missing` A set of argument names that the function expected but where missing. `extra` A dict of keyword arguments that the function can not handle but where provided. `extra_positional` A list of values that where given by positional argument but the function cannot accept. This can be useful for decorators that forward user submitted data to a view function:: from werkzeug.utils import ArgumentValidationError, validate_arguments def sanitize(f): def proxy(request): data = request.values.to_dict() try: args, kwargs = validate_arguments(f, (request,), data) except ArgumentValidationError: raise BadRequest('The browser failed to transmit all ' 'the data expected.') return f(*args, **kwargs) return proxy :param func: the function the validation is performed against. :param args: a tuple of positional arguments. :param kwargs: a dict of keyword arguments. :param drop_extra: set to `False` if you don't want extra arguments to be silently dropped. :return: tuple in the form ``(args, kwargs)``. """ parser = _parse_signature(func) args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5] if missing: raise ArgumentValidationError(tuple(missing)) elif (extra or extra_positional) and not drop_extra: raise ArgumentValidationError(None, extra, extra_positional) return tuple(args), kwargs def bind_arguments(func, args, kwargs): """Bind the arguments provided into a dict. When passed a function, a tuple of arguments and a dict of keyword arguments `bind_arguments` returns a dict of names as the function would see it. This can be useful to implement a cache decorator that uses the function arguments to build the cache key based on the values of the arguments. :param func: the function the arguments should be bound for. :param args: tuple of positional arguments. :param kwargs: a dict of keyword arguments. :return: a :class:`dict` of bound keyword arguments. """ ( args, kwargs, missing, extra, extra_positional, arg_spec, vararg_var, kwarg_var, ) = _parse_signature(func)(args, kwargs) values = {} for (name, _has_default, _default), value in zip(arg_spec, args): values[name] = value if vararg_var is not None: values[vararg_var] = tuple(extra_positional) elif extra_positional: raise TypeError("too many positional arguments") if kwarg_var is not None: multikw = set(extra) & set([x[0] for x in arg_spec]) if multikw: raise TypeError( "got multiple values for keyword argument " + repr(next(iter(multikw))) ) values[kwarg_var] = extra elif extra: raise TypeError("got unexpected keyword argument " + repr(next(iter(extra)))) return values class ArgumentValidationError(ValueError): """Raised if :func:`validate_arguments` fails to validate""" def __init__(self, missing=None, extra=None, extra_positional=None): self.missing = set(missing or ()) self.extra = extra or {} self.extra_positional = extra_positional or [] ValueError.__init__( self, "function arguments invalid. (%d missing, %d additional)" % (len(self.missing), len(self.extra) + len(self.extra_positional)), ) class ImportStringError(ImportError): """Provides information about a failed :func:`import_string` attempt.""" #: String in dotted notation that failed to be imported. import_name = None #: Wrapped exception. exception = None def __init__(self, import_name, exception): self.import_name = import_name self.exception = exception msg = ( "import_string() failed for %r. Possible reasons are:\n\n" "- missing __init__.py in a package;\n" "- package or module path not included in sys.path;\n" "- duplicated package or module name taking precedence in " "sys.path;\n" "- missing module, class, function or variable;\n\n" "Debugged import:\n\n%s\n\n" "Original exception:\n\n%s: %s" ) name = "" tracked = [] for part in import_name.replace(":", ".").split("."): name += (name and ".") + part imported = import_string(name, silent=True) if imported: tracked.append((name, getattr(imported, "__file__", None))) else: track = ["- %r found in %r." % (n, i) for n, i in tracked] track.append("- %r not found." % name) msg = msg % ( import_name, "\n".join(track), exception.__class__.__name__, str(exception), ) break ImportError.__init__(self, msg) def __repr__(self): return "<%s(%r, %r)>" % ( self.__class__.__name__, self.import_name, self.exception, )
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/routing.py
# -*- coding: utf-8 -*- """ werkzeug.routing ~~~~~~~~~~~~~~~~ When it comes to combining multiple controller or view functions (however you want to call them) you need a dispatcher. A simple way would be applying regular expression tests on the ``PATH_INFO`` and calling registered callback functions that return the value then. This module implements a much more powerful system than simple regular expression matching because it can also convert values in the URLs and build URLs. Here a simple example that creates an URL map for an application with two subdomains (www and kb) and some URL rules: >>> m = Map([ ... # Static URLs ... Rule('/', endpoint='static/index'), ... Rule('/about', endpoint='static/about'), ... Rule('/help', endpoint='static/help'), ... # Knowledge Base ... Subdomain('kb', [ ... Rule('/', endpoint='kb/index'), ... Rule('/browse/', endpoint='kb/browse'), ... Rule('/browse/<int:id>/', endpoint='kb/browse'), ... Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse') ... ]) ... ], default_subdomain='www') If the application doesn't use subdomains it's perfectly fine to not set the default subdomain and not use the `Subdomain` rule factory. The endpoint in the rules can be anything, for example import paths or unique identifiers. The WSGI application can use those endpoints to get the handler for that URL. It doesn't have to be a string at all but it's recommended. Now it's possible to create a URL adapter for one of the subdomains and build URLs: >>> c = m.bind('example.com') >>> c.build("kb/browse", dict(id=42)) 'http://kb.example.com/browse/42/' >>> c.build("kb/browse", dict()) 'http://kb.example.com/browse/' >>> c.build("kb/browse", dict(id=42, page=3)) 'http://kb.example.com/browse/42/3' >>> c.build("static/about") '/about' >>> c.build("static/index", force_external=True) 'http://www.example.com/' >>> c = m.bind('example.com', subdomain='kb') >>> c.build("static/about") 'http://www.example.com/about' The first argument to bind is the server name *without* the subdomain. Per default it will assume that the script is mounted on the root, but often that's not the case so you can provide the real mount point as second argument: >>> c = m.bind('example.com', '/applications/example') The third argument can be the subdomain, if not given the default subdomain is used. For more details about binding have a look at the documentation of the `MapAdapter`. And here is how you can match URLs: >>> c = m.bind('example.com') >>> c.match("/") ('static/index', {}) >>> c.match("/about") ('static/about', {}) >>> c = m.bind('example.com', '/', 'kb') >>> c.match("/") ('kb/index', {}) >>> c.match("/browse/42/23") ('kb/browse', {'id': 42, 'page': 23}) If matching fails you get a `NotFound` exception, if the rule thinks it's a good idea to redirect (for example because the URL was defined to have a slash at the end but the request was missing that slash) it will raise a `RequestRedirect` exception. Both are subclasses of the `HTTPException` so you can use those errors as responses in the application. If matching succeeded but the URL rule was incompatible to the given method (for example there were only rules for `GET` and `HEAD` and routing system tried to match a `POST` request) a `MethodNotAllowed` exception is raised. :copyright: 2007 Pallets :license: BSD-3-Clause """ import ast import difflib import posixpath import re import uuid import warnings from pprint import pformat from threading import Lock from ._compat import implements_to_string from ._compat import iteritems from ._compat import itervalues from ._compat import native_string_result from ._compat import string_types from ._compat import text_type from ._compat import to_bytes from ._compat import to_unicode from ._compat import wsgi_decoding_dance from ._internal import _encode_idna from ._internal import _get_environ from .datastructures import ImmutableDict from .datastructures import MultiDict from .exceptions import BadHost from .exceptions import BadRequest from .exceptions import HTTPException from .exceptions import MethodNotAllowed from .exceptions import NotFound from .urls import _fast_url_quote from .urls import url_encode from .urls import url_join from .urls import url_quote from .utils import cached_property from .utils import format_string from .utils import redirect from .wsgi import get_host _rule_re = re.compile( r""" (?P<static>[^<]*) # static rule data < (?: (?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name (?:\((?P<args>.*?)\))? # converter arguments \: # variable delimiter )? (?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name > """, re.VERBOSE, ) _simple_rule_re = re.compile(r"<([^>]+)>") _converter_args_re = re.compile( r""" ((?P<name>\w+)\s*=\s*)? (?P<value> True|False| \d+.\d+| \d+.| \d+| [\w\d_.]+| [urUR]?(?P<stringval>"[^"]*?"|'[^']*') )\s*, """, re.VERBOSE | re.UNICODE, ) _PYTHON_CONSTANTS = {"None": None, "True": True, "False": False} def _pythonize(value): if value in _PYTHON_CONSTANTS: return _PYTHON_CONSTANTS[value] for convert in int, float: try: return convert(value) except ValueError: pass if value[:1] == value[-1:] and value[0] in "\"'": value = value[1:-1] return text_type(value) def parse_converter_args(argstr): argstr += "," args = [] kwargs = {} for item in _converter_args_re.finditer(argstr): value = item.group("stringval") if value is None: value = item.group("value") value = _pythonize(value) if not item.group("name"): args.append(value) else: name = item.group("name") kwargs[name] = value return tuple(args), kwargs def parse_rule(rule): """Parse a rule and return it as generator. Each iteration yields tuples in the form ``(converter, arguments, variable)``. If the converter is `None` it's a static url part, otherwise it's a dynamic one. :internal: """ pos = 0 end = len(rule) do_match = _rule_re.match used_names = set() while pos < end: m = do_match(rule, pos) if m is None: break data = m.groupdict() if data["static"]: yield None, None, data["static"] variable = data["variable"] converter = data["converter"] or "default" if variable in used_names: raise ValueError("variable name %r used twice." % variable) used_names.add(variable) yield converter, data["args"] or None, variable pos = m.end() if pos < end: remaining = rule[pos:] if ">" in remaining or "<" in remaining: raise ValueError("malformed url rule: %r" % rule) yield None, None, remaining class RoutingException(Exception): """Special exceptions that require the application to redirect, notifying about missing urls, etc. :internal: """ class RequestRedirect(HTTPException, RoutingException): """Raise if the map requests a redirect. This is for example the case if `strict_slashes` are activated and an url that requires a trailing slash. The attribute `new_url` contains the absolute destination url. """ code = 308 def __init__(self, new_url): RoutingException.__init__(self, new_url) self.new_url = new_url def get_response(self, environ=None): return redirect(self.new_url, self.code) class RequestPath(RoutingException): """Internal exception.""" __slots__ = ("path_info",) def __init__(self, path_info): self.path_info = path_info class RequestAliasRedirect(RoutingException): # noqa: B903 """This rule is an alias and wants to redirect to the canonical URL.""" def __init__(self, matched_values): self.matched_values = matched_values @implements_to_string class BuildError(RoutingException, LookupError): """Raised if the build system cannot find a URL for an endpoint with the values provided. """ def __init__(self, endpoint, values, method, adapter=None): LookupError.__init__(self, endpoint, values, method) self.endpoint = endpoint self.values = values self.method = method self.adapter = adapter @cached_property def suggested(self): return self.closest_rule(self.adapter) def closest_rule(self, adapter): def _score_rule(rule): return sum( [ 0.98 * difflib.SequenceMatcher( None, rule.endpoint, self.endpoint ).ratio(), 0.01 * bool(set(self.values or ()).issubset(rule.arguments)), 0.01 * bool(rule.methods and self.method in rule.methods), ] ) if adapter and adapter.map._rules: return max(adapter.map._rules, key=_score_rule) def __str__(self): message = [] message.append("Could not build url for endpoint %r" % self.endpoint) if self.method: message.append(" (%r)" % self.method) if self.values: message.append(" with values %r" % sorted(self.values.keys())) message.append(".") if self.suggested: if self.endpoint == self.suggested.endpoint: if self.method and self.method not in self.suggested.methods: message.append( " Did you mean to use methods %r?" % sorted(self.suggested.methods) ) missing_values = self.suggested.arguments.union( set(self.suggested.defaults or ()) ) - set(self.values.keys()) if missing_values: message.append( " Did you forget to specify values %r?" % sorted(missing_values) ) else: message.append(" Did you mean %r instead?" % self.suggested.endpoint) return u"".join(message) class WebsocketMismatch(BadRequest): """The only matched rule is either a WebSocket and the request is HTTP, or the rule is HTTP and the request is a WebSocket. """ class ValidationError(ValueError): """Validation error. If a rule converter raises this exception the rule does not match the current URL and the next URL is tried. """ class RuleFactory(object): """As soon as you have more complex URL setups it's a good idea to use rule factories to avoid repetitive tasks. Some of them are builtin, others can be added by subclassing `RuleFactory` and overriding `get_rules`. """ def get_rules(self, map): """Subclasses of `RuleFactory` have to override this method and return an iterable of rules.""" raise NotImplementedError() class Subdomain(RuleFactory): """All URLs provided by this factory have the subdomain set to a specific domain. For example if you want to use the subdomain for the current language this can be a good setup:: url_map = Map([ Rule('/', endpoint='#select_language'), Subdomain('<string(length=2):lang_code>', [ Rule('/', endpoint='index'), Rule('/about', endpoint='about'), Rule('/help', endpoint='help') ]) ]) All the rules except for the ``'#select_language'`` endpoint will now listen on a two letter long subdomain that holds the language code for the current request. """ def __init__(self, subdomain, rules): self.subdomain = subdomain self.rules = rules def get_rules(self, map): for rulefactory in self.rules: for rule in rulefactory.get_rules(map): rule = rule.empty() rule.subdomain = self.subdomain yield rule class Submount(RuleFactory): """Like `Subdomain` but prefixes the URL rule with a given string:: url_map = Map([ Rule('/', endpoint='index'), Submount('/blog', [ Rule('/', endpoint='blog/index'), Rule('/entry/<entry_slug>', endpoint='blog/show') ]) ]) Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``. """ def __init__(self, path, rules): self.path = path.rstrip("/") self.rules = rules def get_rules(self, map): for rulefactory in self.rules: for rule in rulefactory.get_rules(map): rule = rule.empty() rule.rule = self.path + rule.rule yield rule class EndpointPrefix(RuleFactory): """Prefixes all endpoints (which must be strings for this factory) with another string. This can be useful for sub applications:: url_map = Map([ Rule('/', endpoint='index'), EndpointPrefix('blog/', [Submount('/blog', [ Rule('/', endpoint='index'), Rule('/entry/<entry_slug>', endpoint='show') ])]) ]) """ def __init__(self, prefix, rules): self.prefix = prefix self.rules = rules def get_rules(self, map): for rulefactory in self.rules: for rule in rulefactory.get_rules(map): rule = rule.empty() rule.endpoint = self.prefix + rule.endpoint yield rule class RuleTemplate(object): """Returns copies of the rules wrapped and expands string templates in the endpoint, rule, defaults or subdomain sections. Here a small example for such a rule template:: from werkzeug.routing import Map, Rule, RuleTemplate resource = RuleTemplate([ Rule('/$name/', endpoint='$name.list'), Rule('/$name/<int:id>', endpoint='$name.show') ]) url_map = Map([resource(name='user'), resource(name='page')]) When a rule template is called the keyword arguments are used to replace the placeholders in all the string parameters. """ def __init__(self, rules): self.rules = list(rules) def __call__(self, *args, **kwargs): return RuleTemplateFactory(self.rules, dict(*args, **kwargs)) class RuleTemplateFactory(RuleFactory): """A factory that fills in template variables into rules. Used by `RuleTemplate` internally. :internal: """ def __init__(self, rules, context): self.rules = rules self.context = context def get_rules(self, map): for rulefactory in self.rules: for rule in rulefactory.get_rules(map): new_defaults = subdomain = None if rule.defaults: new_defaults = {} for key, value in iteritems(rule.defaults): if isinstance(value, string_types): value = format_string(value, self.context) new_defaults[key] = value if rule.subdomain is not None: subdomain = format_string(rule.subdomain, self.context) new_endpoint = rule.endpoint if isinstance(new_endpoint, string_types): new_endpoint = format_string(new_endpoint, self.context) yield Rule( format_string(rule.rule, self.context), new_defaults, subdomain, rule.methods, rule.build_only, new_endpoint, rule.strict_slashes, ) def _prefix_names(src): """ast parse and prefix names with `.` to avoid collision with user vars""" tree = ast.parse(src).body[0] if isinstance(tree, ast.Expr): tree = tree.value for node in ast.walk(tree): if isinstance(node, ast.Name): node.id = "." + node.id return tree _CALL_CONVERTER_CODE_FMT = "self._converters[{elem!r}].to_url()" _IF_KWARGS_URL_ENCODE_CODE = """\ if kwargs: q = '?' params = self._encode_query_vars(kwargs) else: q = params = '' """ _IF_KWARGS_URL_ENCODE_AST = _prefix_names(_IF_KWARGS_URL_ENCODE_CODE) _URL_ENCODE_AST_NAMES = (_prefix_names("q"), _prefix_names("params")) @implements_to_string class Rule(RuleFactory): """A Rule represents one URL pattern. There are some options for `Rule` that change the way it behaves and are passed to the `Rule` constructor. Note that besides the rule-string all arguments *must* be keyword arguments in order to not break the application on Werkzeug upgrades. `string` Rule strings basically are just normal URL paths with placeholders in the format ``<converter(arguments):name>`` where the converter and the arguments are optional. If no converter is defined the `default` converter is used which means `string` in the normal configuration. URL rules that end with a slash are branch URLs, others are leaves. If you have `strict_slashes` enabled (which is the default), all branch URLs that are matched without a trailing slash will trigger a redirect to the same URL with the missing slash appended. The converters are defined on the `Map`. `endpoint` The endpoint for this rule. This can be anything. A reference to a function, a string, a number etc. The preferred way is using a string because the endpoint is used for URL generation. `defaults` An optional dict with defaults for other rules with the same endpoint. This is a bit tricky but useful if you want to have unique URLs:: url_map = Map([ Rule('/all/', defaults={'page': 1}, endpoint='all_entries'), Rule('/all/page/<int:page>', endpoint='all_entries') ]) If a user now visits ``http://example.com/all/page/1`` he will be redirected to ``http://example.com/all/``. If `redirect_defaults` is disabled on the `Map` instance this will only affect the URL generation. `subdomain` The subdomain rule string for this rule. If not specified the rule only matches for the `default_subdomain` of the map. If the map is not bound to a subdomain this feature is disabled. Can be useful if you want to have user profiles on different subdomains and all subdomains are forwarded to your application:: url_map = Map([ Rule('/', subdomain='<username>', endpoint='user/homepage'), Rule('/stats', subdomain='<username>', endpoint='user/stats') ]) `methods` A sequence of http methods this rule applies to. If not specified, all methods are allowed. For example this can be useful if you want different endpoints for `POST` and `GET`. If methods are defined and the path matches but the method matched against is not in this list or in the list of another rule for that path the error raised is of the type `MethodNotAllowed` rather than `NotFound`. If `GET` is present in the list of methods and `HEAD` is not, `HEAD` is added automatically. `strict_slashes` Override the `Map` setting for `strict_slashes` only for this rule. If not specified the `Map` setting is used. `merge_slashes` Override :attr:`Map.merge_slashes` for this rule. `build_only` Set this to True and the rule will never match but will create a URL that can be build. This is useful if you have resources on a subdomain or folder that are not handled by the WSGI application (like static data) `redirect_to` If given this must be either a string or callable. In case of a callable it's called with the url adapter that triggered the match and the values of the URL as keyword arguments and has to return the target for the redirect, otherwise it has to be a string with placeholders in rule syntax:: def foo_with_slug(adapter, id): # ask the database for the slug for the old id. this of # course has nothing to do with werkzeug. return 'foo/' + Foo.get_slug_for_id(id) url_map = Map([ Rule('/foo/<slug>', endpoint='foo'), Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'), Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug) ]) When the rule is matched the routing system will raise a `RequestRedirect` exception with the target for the redirect. Keep in mind that the URL will be joined against the URL root of the script so don't use a leading slash on the target URL unless you really mean root of that domain. `alias` If enabled this rule serves as an alias for another rule with the same endpoint and arguments. `host` If provided and the URL map has host matching enabled this can be used to provide a match rule for the whole host. This also means that the subdomain feature is disabled. `websocket` If ``True``, this rule is only matches for WebSocket (``ws://``, ``wss://``) requests. By default, rules will only match for HTTP requests. .. versionadded:: 1.0 Added ``websocket``. .. versionadded:: 1.0 Added ``merge_slashes``. .. versionadded:: 0.7 Added ``alias`` and ``host``. .. versionchanged:: 0.6.1 ``HEAD`` is added to ``methods`` if ``GET`` is present. """ def __init__( self, string, defaults=None, subdomain=None, methods=None, build_only=False, endpoint=None, strict_slashes=None, merge_slashes=None, redirect_to=None, alias=False, host=None, websocket=False, ): if not string.startswith("/"): raise ValueError("urls must start with a leading slash") self.rule = string self.is_leaf = not string.endswith("/") self.map = None self.strict_slashes = strict_slashes self.merge_slashes = merge_slashes self.subdomain = subdomain self.host = host self.defaults = defaults self.build_only = build_only self.alias = alias self.websocket = websocket if methods is not None: if isinstance(methods, str): raise TypeError("'methods' should be a list of strings.") methods = {x.upper() for x in methods} if "HEAD" not in methods and "GET" in methods: methods.add("HEAD") if websocket and methods - {"GET", "HEAD", "OPTIONS"}: raise ValueError( "WebSocket rules can only use 'GET', 'HEAD', and 'OPTIONS' methods." ) self.methods = methods self.endpoint = endpoint self.redirect_to = redirect_to if defaults: self.arguments = set(map(str, defaults)) else: self.arguments = set() self._trace = self._converters = self._regex = self._argument_weights = None def empty(self): """ Return an unbound copy of this rule. This can be useful if want to reuse an already bound URL for another map. See ``get_empty_kwargs`` to override what keyword arguments are provided to the new copy. """ return type(self)(self.rule, **self.get_empty_kwargs()) def get_empty_kwargs(self): """ Provides kwargs for instantiating empty copy with empty() Use this method to provide custom keyword arguments to the subclass of ``Rule`` when calling ``some_rule.empty()``. Helpful when the subclass has custom keyword arguments that are needed at instantiation. Must return a ``dict`` that will be provided as kwargs to the new instance of ``Rule``, following the initial ``self.rule`` value which is always provided as the first, required positional argument. """ defaults = None if self.defaults: defaults = dict(self.defaults) return dict( defaults=defaults, subdomain=self.subdomain, methods=self.methods, build_only=self.build_only, endpoint=self.endpoint, strict_slashes=self.strict_slashes, redirect_to=self.redirect_to, alias=self.alias, host=self.host, ) def get_rules(self, map): yield self def refresh(self): """Rebinds and refreshes the URL. Call this if you modified the rule in place. :internal: """ self.bind(self.map, rebind=True) def bind(self, map, rebind=False): """Bind the url to a map and create a regular expression based on the information from the rule itself and the defaults from the map. :internal: """ if self.map is not None and not rebind: raise RuntimeError("url rule %r already bound to map %r" % (self, self.map)) self.map = map if self.strict_slashes is None: self.strict_slashes = map.strict_slashes if self.merge_slashes is None: self.merge_slashes = map.merge_slashes if self.subdomain is None: self.subdomain = map.default_subdomain self.compile() def get_converter(self, variable_name, converter_name, args, kwargs): """Looks up the converter for the given parameter. .. versionadded:: 0.9 """ if converter_name not in self.map.converters: raise LookupError("the converter %r does not exist" % converter_name) return self.map.converters[converter_name](self.map, *args, **kwargs) def _encode_query_vars(self, query_vars): return url_encode( query_vars, charset=self.map.charset, sort=self.map.sort_parameters, key=self.map.sort_key, ) def compile(self): """Compiles the regular expression and stores it.""" assert self.map is not None, "rule not bound" if self.map.host_matching: domain_rule = self.host or "" else: domain_rule = self.subdomain or "" self._trace = [] self._converters = {} self._static_weights = [] self._argument_weights = [] regex_parts = [] def _build_regex(rule): index = 0 for converter, arguments, variable in parse_rule(rule): if converter is None: for match in re.finditer(r"/+|[^/]+", variable): part = match.group(0) if part.startswith("/"): if self.merge_slashes: regex_parts.append(r"/+?") self._trace.append((False, "/")) else: regex_parts.append(part) self._trace.append((False, part)) continue self._trace.append((False, part)) regex_parts.append(re.escape(part)) if part: self._static_weights.append((index, -len(part))) else: if arguments: c_args, c_kwargs = parse_converter_args(arguments) else: c_args = () c_kwargs = {} convobj = self.get_converter(variable, converter, c_args, c_kwargs) regex_parts.append("(?P<%s>%s)" % (variable, convobj.regex)) self._converters[variable] = convobj self._trace.append((True, variable)) self._argument_weights.append(convobj.weight) self.arguments.add(str(variable)) index = index + 1 _build_regex(domain_rule) regex_parts.append("\\|") self._trace.append((False, "|")) _build_regex(self.rule if self.is_leaf else self.rule.rstrip("/")) if not self.is_leaf: self._trace.append((False, "/")) self._build = self._compile_builder(False).__get__(self, None) self._build_unknown = self._compile_builder(True).__get__(self, None) if self.build_only: return if not (self.is_leaf and self.strict_slashes): reps = u"*" if self.merge_slashes else u"?" tail = u"(?<!/)(?P<__suffix__>/%s)" % reps else: tail = u"" regex = u"^%s%s$" % (u"".join(regex_parts), tail) self._regex = re.compile(regex, re.UNICODE) def match(self, path, method=None): """Check if the rule matches a given path. Path is a string in the form ``"subdomain|/path"`` and is assembled by the map. If the map is doing host matching the subdomain part will be the host instead. If the rule matches a dict with the converted values is returned, otherwise the return value is `None`. :internal: """ if not self.build_only: require_redirect = False m = self._regex.search(path) if m is not None: groups = m.groupdict() # we have a folder like part of the url without a trailing # slash and strict slashes enabled. raise an exception that # tells the map to redirect to the same url but with a # trailing slash if ( self.strict_slashes and not self.is_leaf and not groups.pop("__suffix__") and ( method is None or self.methods is None or method in self.methods ) ): path += "/" require_redirect = True # if we are not in strict slashes mode we have to remove # a __suffix__ elif not self.strict_slashes: del groups["__suffix__"] result = {} for name, value in iteritems(groups): try: value = self._converters[name].to_python(value) except ValidationError: return result[str(name)] = value if self.defaults: result.update(self.defaults) if self.merge_slashes: new_path = "|".join(self.build(result, False)) if path.endswith("/") and not new_path.endswith("/"): new_path += "/" if new_path.count("/") < path.count("/"): path = new_path require_redirect = True if require_redirect: path = path.split("|", 1)[1] raise RequestPath(path) if self.alias and self.map.redirect_defaults: raise RequestAliasRedirect(result) return result @staticmethod def _get_func_code(code, name): globs, locs = {}, {} exec(code, globs, locs) return locs[name] def _compile_builder(self, append_unknown=True): defaults = self.defaults or {} dom_ops = [] url_ops = [] opl = dom_ops for is_dynamic, data in self._trace: if data == "|" and opl is dom_ops: opl = url_ops continue # this seems like a silly case to ever come up but: # if a default is given for a value that appears in the rule, # resolve it to a constant ahead of time if is_dynamic and data in defaults: data = self._converters[data].to_url(defaults[data]) opl.append((False, data)) elif not is_dynamic: opl.append( (False, url_quote(to_bytes(data, self.map.charset), safe="/:|+")) ) else: opl.append((True, data)) def _convert(elem): ret = _prefix_names(_CALL_CONVERTER_CODE_FMT.format(elem=elem)) ret.args = [ast.Name(str(elem), ast.Load())] # str for py2 return ret def _parts(ops): parts = [ _convert(elem) if is_dynamic else ast.Str(s=elem) for is_dynamic, elem in ops ] parts = parts or [ast.Str("")] # constant fold ret = [parts[0]] for p in parts[1:]: if isinstance(p, ast.Str) and isinstance(ret[-1], ast.Str): ret[-1] = ast.Str(ret[-1].s + p.s) else: ret.append(p) return ret dom_parts = _parts(dom_ops) url_parts = _parts(url_ops) if not append_unknown: body = [] else: body = [_IF_KWARGS_URL_ENCODE_AST] url_parts.extend(_URL_ENCODE_AST_NAMES) def _join(parts): if len(parts) == 1: # shortcut return parts[0] elif hasattr(ast, "JoinedStr"): # py36+ return ast.JoinedStr(parts) else: call = _prefix_names('"".join()') call.args = [ast.Tuple(parts, ast.Load())] return call body.append( ast.Return(ast.Tuple([_join(dom_parts), _join(url_parts)], ast.Load())) ) # str is necessary for python2 pargs = [ str(elem) for is_dynamic, elem in dom_ops + url_ops if is_dynamic and elem not in defaults ] kargs = [str(k) for k in defaults] func_ast = _prefix_names("def _(): pass") func_ast.name = "<builder:{!r}>".format(self.rule) if hasattr(ast, "arg"): # py3 func_ast.args.args.append(ast.arg(".self", None)) for arg in pargs + kargs: func_ast.args.args.append(ast.arg(arg, None)) func_ast.args.kwarg = ast.arg(".kwargs", None) else: func_ast.args.args.append(ast.Name(".self", ast.Param())) for arg in pargs + kargs: func_ast.args.args.append(ast.Name(arg, ast.Param())) func_ast.args.kwarg = ".kwargs" for _ in kargs: func_ast.args.defaults.append(ast.Str("")) func_ast.body = body # use `ast.parse` instead of `ast.Module` for better portability # python3.8 changes the signature of `ast.Module` module = ast.parse("") module.body = [func_ast] # mark everything as on line 1, offset 0 # less error-prone than `ast.fix_missing_locations` # bad line numbers cause an assert to fail in debug builds for node in ast.walk(module): if "lineno" in node._attributes: node.lineno = 1 if "col_offset" in node._attributes: node.col_offset = 0 code = compile(module, "<werkzeug routing>", "exec") return self._get_func_code(code, func_ast.name) def build(self, values, append_unknown=True): """Assembles the relative url for that rule and the subdomain. If building doesn't work for some reasons `None` is returned. :internal: """ try: if append_unknown: return self._build_unknown(**values) else: return self._build(**values) except ValidationError: return None def provides_defaults_for(self, rule): """Check if this rule has defaults for a given rule. :internal: """ return ( not self.build_only and self.defaults and self.endpoint == rule.endpoint and self != rule and self.arguments == rule.arguments ) def suitable_for(self, values, method=None): """Check if the dict of values has enough data for url generation. :internal: """ # if a method was given explicitly and that method is not supported # by this rule, this rule is not suitable. if ( method is not None and self.methods is not None and method not in self.methods ): return False defaults = self.defaults or () # all arguments required must be either in the defaults dict or # the value dictionary otherwise it's not suitable for key in self.arguments: if key not in defaults and key not in values: return False # in case defaults are given we ensure that either the value was # skipped or the value is the same as the default value. if defaults: for key, value in iteritems(defaults): if key in values and value != values[key]: return False return True def match_compare_key(self): """The match compare key for sorting. Current implementation: 1. rules without any arguments come first for performance reasons only as we expect them to match faster and some common ones usually don't have any arguments (index pages etc.) 2. rules with more static parts come first so the second argument is the negative length of the number of the static weights. 3. we order by static weights, which is a combination of index and length 4. The more complex rules come first so the next argument is the negative length of the number of argument weights. 5. lastly we order by the actual argument weights. :internal: """ return ( bool(self.arguments), -len(self._static_weights), self._static_weights, -len(self._argument_weights), self._argument_weights, ) def build_compare_key(self): """The build compare key for sorting. :internal: """ return 1 if self.alias else 0, -len(self.arguments), -len(self.defaults or ()) def __eq__(self, other): return self.__class__ is other.__class__ and self._trace == other._trace __hash__ = None def __ne__(self, other): return not self.__eq__(other) def __str__(self): return self.rule @native_string_result def __repr__(self): if self.map is None: return u"<%s (unbound)>" % self.__class__.__name__ tmp = [] for is_dynamic, data in self._trace: if is_dynamic: tmp.append(u"<%s>" % data) else: tmp.append(data) return u"<%s %s%s -> %s>" % ( self.__class__.__name__, repr((u"".join(tmp)).lstrip(u"|")).lstrip(u"u"), self.methods is not None and u" (%s)" % u", ".join(self.methods) or u"", self.endpoint, ) class BaseConverter(object): """Base class for all converters.""" regex = "[^/]+" weight = 100 def __init__(self, map): self.map = map def to_python(self, value): return value def to_url(self, value): if isinstance(value, (bytes, bytearray)): return _fast_url_quote(value) return _fast_url_quote(text_type(value).encode(self.map.charset)) class UnicodeConverter(BaseConverter): """This converter is the default converter and accepts any string but only one path segment. Thus the string can not include a slash. This is the default validator. Example:: Rule('/pages/<page>'), Rule('/<string(length=2):lang_code>') :param map: the :class:`Map`. :param minlength: the minimum length of the string. Must be greater or equal 1. :param maxlength: the maximum length of the string. :param length: the exact length of the string. """ def __init__(self, map, minlength=1, maxlength=None, length=None): BaseConverter.__init__(self, map) if length is not None: length = "{%d}" % int(length) else: if maxlength is None: maxlength = "" else: maxlength = int(maxlength) length = "{%s,%s}" % (int(minlength), maxlength) self.regex = "[^/]" + length class AnyConverter(BaseConverter): """Matches one of the items provided. Items can either be Python identifiers or strings:: Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>') :param map: the :class:`Map`. :param items: this function accepts the possible items as positional arguments. """ def __init__(self, map, *items): BaseConverter.__init__(self, map) self.regex = "(?:%s)" % "|".join([re.escape(x) for x in items]) class PathConverter(BaseConverter): """Like the default :class:`UnicodeConverter`, but it also matches slashes. This is useful for wikis and similar applications:: Rule('/<path:wikipage>') Rule('/<path:wikipage>/edit') :param map: the :class:`Map`. """ regex = "[^/].*?" weight = 200 class NumberConverter(BaseConverter): """Baseclass for `IntegerConverter` and `FloatConverter`. :internal: """ weight = 50 def __init__(self, map, fixed_digits=0, min=None, max=None, signed=False): if signed: self.regex = self.signed_regex BaseConverter.__init__(self, map) self.fixed_digits = fixed_digits self.min = min self.max = max self.signed = signed def to_python(self, value): if self.fixed_digits and len(value) != self.fixed_digits: raise ValidationError() value = self.num_convert(value) if (self.min is not None and value < self.min) or ( self.max is not None and value > self.max ): raise ValidationError() return value def to_url(self, value): value = self.num_convert(value) if self.fixed_digits: value = ("%%0%sd" % self.fixed_digits) % value return str(value) @property def signed_regex(self): return r"-?" + self.regex class IntegerConverter(NumberConverter): """This converter only accepts integer values:: Rule("/page/<int:page>") By default it only accepts unsigned, positive values. The ``signed`` parameter will enable signed, negative values. :: Rule("/page/<int(signed=True):page>") :param map: The :class:`Map`. :param fixed_digits: The number of fixed digits in the URL. If you set this to ``4`` for example, the rule will only match if the URL looks like ``/0001/``. The default is variable length. :param min: The minimal value. :param max: The maximal value. :param signed: Allow signed (negative) values. .. versionadded:: 0.15 The ``signed`` parameter. """ regex = r"\d+" num_convert = int class FloatConverter(NumberConverter): """This converter only accepts floating point values:: Rule("/probability/<float:probability>") By default it only accepts unsigned, positive values. The ``signed`` parameter will enable signed, negative values. :: Rule("/offset/<float(signed=True):offset>") :param map: The :class:`Map`. :param min: The minimal value. :param max: The maximal value. :param signed: Allow signed (negative) values. .. versionadded:: 0.15 The ``signed`` parameter. """ regex = r"\d+\.\d+" num_convert = float def __init__(self, map, min=None, max=None, signed=False): NumberConverter.__init__(self, map, min=min, max=max, signed=signed) class UUIDConverter(BaseConverter): """This converter only accepts UUID strings:: Rule('/object/<uuid:identifier>') .. versionadded:: 0.10 :param map: the :class:`Map`. """ regex = ( r"[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-" r"[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}" ) def to_python(self, value): return uuid.UUID(value) def to_url(self, value): return str(value) #: the default converter mapping for the map. DEFAULT_CONVERTERS = { "default": UnicodeConverter, "string": UnicodeConverter, "any": AnyConverter, "path": PathConverter, "int": IntegerConverter, "float": FloatConverter, "uuid": UUIDConverter, } class Map(object): """The map class stores all the URL rules and some configuration parameters. Some of the configuration values are only stored on the `Map` instance since those affect all rules, others are just defaults and can be overridden for each rule. Note that you have to specify all arguments besides the `rules` as keyword arguments! :param rules: sequence of url rules for this map. :param default_subdomain: The default subdomain for rules without a subdomain defined. :param charset: charset of the url. defaults to ``"utf-8"`` :param strict_slashes: If a rule ends with a slash but the matched URL does not, redirect to the URL with a trailing slash. :param merge_slashes: Merge consecutive slashes when matching or building URLs. Matches will redirect to the normalized URL. Slashes in variable parts are not merged. :param redirect_defaults: This will redirect to the default rule if it wasn't visited that way. This helps creating unique URLs. :param converters: A dict of converters that adds additional converters to the list of converters. If you redefine one converter this will override the original one. :param sort_parameters: If set to `True` the url parameters are sorted. See `url_encode` for more details. :param sort_key: The sort key function for `url_encode`. :param encoding_errors: the error method to use for decoding :param host_matching: if set to `True` it enables the host matching feature and disables the subdomain one. If enabled the `host` parameter to rules is used instead of the `subdomain` one. .. versionchanged:: 1.0 If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules will match. .. versionchanged:: 1.0 Added ``merge_slashes``. .. versionchanged:: 0.7 Added ``encoding_errors`` and ``host_matching``. .. versionchanged:: 0.5 Added ``sort_parameters`` and ``sort_key``. """ #: A dict of default converters to be used. default_converters = ImmutableDict(DEFAULT_CONVERTERS) #: The type of lock to use when updating. #: #: .. versionadded:: 1.0 lock_class = Lock def __init__( self, rules=None, default_subdomain="", charset="utf-8", strict_slashes=True, merge_slashes=True, redirect_defaults=True, converters=None, sort_parameters=False, sort_key=None, encoding_errors="replace", host_matching=False, ): self._rules = [] self._rules_by_endpoint = {} self._remap = True self._remap_lock = self.lock_class() self.default_subdomain = default_subdomain self.charset = charset self.encoding_errors = encoding_errors self.strict_slashes = strict_slashes self.merge_slashes = merge_slashes self.redirect_defaults = redirect_defaults self.host_matching = host_matching self.converters = self.default_converters.copy() if converters: self.converters.update(converters) self.sort_parameters = sort_parameters self.sort_key = sort_key for rulefactory in rules or (): self.add(rulefactory) def is_endpoint_expecting(self, endpoint, *arguments): """Iterate over all rules and check if the endpoint expects the arguments provided. This is for example useful if you have some URLs that expect a language code and others that do not and you want to wrap the builder a bit so that the current language code is automatically added if not provided but endpoints expect it. :param endpoint: the endpoint to check. :param arguments: this function accepts one or more arguments as positional arguments. Each one of them is checked. """ self.update() arguments = set(arguments) for rule in self._rules_by_endpoint[endpoint]: if arguments.issubset(rule.arguments): return True return False def iter_rules(self, endpoint=None): """Iterate over all rules or the rules of an endpoint. :param endpoint: if provided only the rules for that endpoint are returned. :return: an iterator """ self.update() if endpoint is not None: return iter(self._rules_by_endpoint[endpoint]) return iter(self._rules) def add(self, rulefactory): """Add a new rule or factory to the map and bind it. Requires that the rule is not bound to another map. :param rulefactory: a :class:`Rule` or :class:`RuleFactory` """ for rule in rulefactory.get_rules(self): rule.bind(self) self._rules.append(rule) self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule) self._remap = True def bind( self, server_name, script_name=None, subdomain=None, url_scheme="http", default_method="GET", path_info=None, query_args=None, ): """Return a new :class:`MapAdapter` with the details specified to the call. Note that `script_name` will default to ``'/'`` if not further specified or `None`. The `server_name` at least is a requirement because the HTTP RFC requires absolute URLs for redirects and so all redirect exceptions raised by Werkzeug will contain the full canonical URL. If no path_info is passed to :meth:`match` it will use the default path info passed to bind. While this doesn't really make sense for manual bind calls, it's useful if you bind a map to a WSGI environment which already contains the path info. `subdomain` will default to the `default_subdomain` for this map if no defined. If there is no `default_subdomain` you cannot use the subdomain feature. .. versionchanged:: 1.0 If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules will match. .. versionchanged:: 0.15 ``path_info`` defaults to ``'/'`` if ``None``. .. versionchanged:: 0.8 ``query_args`` can be a string. .. versionchanged:: 0.7 Added ``query_args``. """ server_name = server_name.lower() if self.host_matching: if subdomain is not None: raise RuntimeError("host matching enabled and a subdomain was provided") elif subdomain is None: subdomain = self.default_subdomain if script_name is None: script_name = "/" if path_info is None: path_info = "/" try: server_name = _encode_idna(server_name) except UnicodeError: raise BadHost() return MapAdapter( self, server_name, script_name, subdomain, url_scheme, path_info, default_method, query_args, ) def bind_to_environ(self, environ, server_name=None, subdomain=None): """Like :meth:`bind` but you can pass it an WSGI environment and it will fetch the information from that dictionary. Note that because of limitations in the protocol there is no way to get the current subdomain and real `server_name` from the environment. If you don't provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or `HTTP_HOST` if provided) as used `server_name` with disabled subdomain feature. If `subdomain` is `None` but an environment and a server name is provided it will calculate the current subdomain automatically. Example: `server_name` is ``'example.com'`` and the `SERVER_NAME` in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated subdomain will be ``'staging.dev'``. If the object passed as environ has an environ attribute, the value of this attribute is used instead. This allows you to pass request objects. Additionally `PATH_INFO` added as a default of the :class:`MapAdapter` so that you don't have to pass the path info to the match method. .. versionchanged:: 1.0.0 If the passed server name specifies port 443, it will match if the incoming scheme is ``https`` without a port. .. versionchanged:: 1.0.0 A warning is shown when the passed server name does not match the incoming WSGI server name. .. versionchanged:: 0.8 This will no longer raise a ValueError when an unexpected server name was passed. .. versionchanged:: 0.5 previously this method accepted a bogus `calculate_subdomain` parameter that did not have any effect. It was removed because of that. :param environ: a WSGI environment. :param server_name: an optional server name hint (see above). :param subdomain: optionally the current subdomain (see above). """ environ = _get_environ(environ) wsgi_server_name = get_host(environ).lower() scheme = environ["wsgi.url_scheme"] if server_name is None: server_name = wsgi_server_name else: server_name = server_name.lower() # strip standard port to match get_host() if scheme == "http" and server_name.endswith(":80"): server_name = server_name[:-3] elif scheme == "https" and server_name.endswith(":443"): server_name = server_name[:-4] if subdomain is None and not self.host_matching: cur_server_name = wsgi_server_name.split(".") real_server_name = server_name.split(".") offset = -len(real_server_name) if cur_server_name[offset:] != real_server_name: # This can happen even with valid configs if the server was # accessed directly by IP address under some situations. # Instead of raising an exception like in Werkzeug 0.7 or # earlier we go by an invalid subdomain which will result # in a 404 error on matching. warnings.warn( "Current server name '{}' doesn't match configured" " server name '{}'".format(wsgi_server_name, server_name), stacklevel=2, ) subdomain = "<invalid>" else: subdomain = ".".join(filter(None, cur_server_name[:offset])) def _get_wsgi_string(name): val = environ.get(name) if val is not None: return wsgi_decoding_dance(val, self.charset) script_name = _get_wsgi_string("SCRIPT_NAME") path_info = _get_wsgi_string("PATH_INFO") query_args = _get_wsgi_string("QUERY_STRING") return Map.bind( self, server_name, script_name, subdomain, scheme, environ["REQUEST_METHOD"], path_info, query_args=query_args, ) def update(self): """Called before matching and building to keep the compiled rules in the correct order after things changed. """ if not self._remap: return with self._remap_lock: if not self._remap: return self._rules.sort(key=lambda x: x.match_compare_key()) for rules in itervalues(self._rules_by_endpoint): rules.sort(key=lambda x: x.build_compare_key()) self._remap = False def __repr__(self): rules = self.iter_rules() return "%s(%s)" % (self.__class__.__name__, pformat(list(rules))) class MapAdapter(object): """Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does the URL matching and building based on runtime information. """ def __init__( self, map, server_name, script_name, subdomain, url_scheme, path_info, default_method, query_args=None, ): self.map = map self.server_name = to_unicode(server_name) script_name = to_unicode(script_name) if not script_name.endswith(u"/"): script_name += u"/" self.script_name = script_name self.subdomain = to_unicode(subdomain) self.url_scheme = to_unicode(url_scheme) self.path_info = to_unicode(path_info) self.default_method = to_unicode(default_method) self.query_args = query_args self.websocket = self.url_scheme in {"ws", "wss"} def dispatch( self, view_func, path_info=None, method=None, catch_http_exceptions=False ): """Does the complete dispatching process. `view_func` is called with the endpoint and a dict with the values for the view. It should look up the view function, call it, and return a response object or WSGI application. http exceptions are not caught by default so that applications can display nicer error messages by just catching them by hand. If you want to stick with the default error messages you can pass it ``catch_http_exceptions=True`` and it will catch the http exceptions. Here a small example for the dispatch usage:: from werkzeug.wrappers import Request, Response from werkzeug.wsgi import responder from werkzeug.routing import Map, Rule def on_index(request): return Response('Hello from the index') url_map = Map([Rule('/', endpoint='index')]) views = {'index': on_index} @responder def application(environ, start_response): request = Request(environ) urls = url_map.bind_to_environ(environ) return urls.dispatch(lambda e, v: views[e](request, **v), catch_http_exceptions=True) Keep in mind that this method might return exception objects, too, so use :class:`Response.force_type` to get a response object. :param view_func: a function that is called with the endpoint as first argument and the value dict as second. Has to dispatch to the actual view function with this information. (see above) :param path_info: the path info to use for matching. Overrides the path info specified on binding. :param method: the HTTP method used for matching. Overrides the method specified on binding. :param catch_http_exceptions: set to `True` to catch any of the werkzeug :class:`HTTPException`\\s. """ try: try: endpoint, args = self.match(path_info, method) except RequestRedirect as e: return e return view_func(endpoint, args) except HTTPException as e: if catch_http_exceptions: return e raise def match( self, path_info=None, method=None, return_rule=False, query_args=None, websocket=None, ): """The usage is simple: you just pass the match method the current path info as well as the method (which defaults to `GET`). The following things can then happen: - you receive a `NotFound` exception that indicates that no URL is matching. A `NotFound` exception is also a WSGI application you can call to get a default page not found page (happens to be the same object as `werkzeug.exceptions.NotFound`) - you receive a `MethodNotAllowed` exception that indicates that there is a match for this URL but not for the current request method. This is useful for RESTful applications. - you receive a `RequestRedirect` exception with a `new_url` attribute. This exception is used to notify you about a request Werkzeug requests from your WSGI application. This is for example the case if you request ``/foo`` although the correct URL is ``/foo/`` You can use the `RequestRedirect` instance as response-like object similar to all other subclasses of `HTTPException`. - you receive a ``WebsocketMismatch`` exception if the only match is a WebSocket rule but the bind is an HTTP request, or if the match is an HTTP rule but the bind is a WebSocket request. - you get a tuple in the form ``(endpoint, arguments)`` if there is a match (unless `return_rule` is True, in which case you get a tuple in the form ``(rule, arguments)``) If the path info is not passed to the match method the default path info of the map is used (defaults to the root URL if not defined explicitly). All of the exceptions raised are subclasses of `HTTPException` so they can be used as WSGI responses. They will all render generic error or redirect pages. Here is a small example for matching: >>> m = Map([ ... Rule('/', endpoint='index'), ... Rule('/downloads/', endpoint='downloads/index'), ... Rule('/downloads/<int:id>', endpoint='downloads/show') ... ]) >>> urls = m.bind("example.com", "/") >>> urls.match("/", "GET") ('index', {}) >>> urls.match("/downloads/42") ('downloads/show', {'id': 42}) And here is what happens on redirect and missing URLs: >>> urls.match("/downloads") Traceback (most recent call last): ... RequestRedirect: http://example.com/downloads/ >>> urls.match("/missing") Traceback (most recent call last): ... NotFound: 404 Not Found :param path_info: the path info to use for matching. Overrides the path info specified on binding. :param method: the HTTP method used for matching. Overrides the method specified on binding. :param return_rule: return the rule that matched instead of just the endpoint (defaults to `False`). :param query_args: optional query arguments that are used for automatic redirects as string or dictionary. It's currently not possible to use the query arguments for URL matching. :param websocket: Match WebSocket instead of HTTP requests. A websocket request has a ``ws`` or ``wss`` :attr:`url_scheme`. This overrides that detection. .. versionadded:: 1.0 Added ``websocket``. .. versionchanged:: 0.8 ``query_args`` can be a string. .. versionadded:: 0.7 Added ``query_args``. .. versionadded:: 0.6 Added ``return_rule``. """ self.map.update() if path_info is None: path_info = self.path_info else: path_info = to_unicode(path_info, self.map.charset) if query_args is None: query_args = self.query_args method = (method or self.default_method).upper() if websocket is None: websocket = self.websocket require_redirect = False path = u"%s|%s" % ( self.map.host_matching and self.server_name or self.subdomain, path_info and "/%s" % path_info.lstrip("/"), ) have_match_for = set() websocket_mismatch = False for rule in self.map._rules: try: rv = rule.match(path, method) except RequestPath as e: raise RequestRedirect( self.make_redirect_url( url_quote(e.path_info, self.map.charset, safe="/:|+"), query_args, ) ) except RequestAliasRedirect as e: raise RequestRedirect( self.make_alias_redirect_url( path, rule.endpoint, e.matched_values, method, query_args ) ) if rv is None: continue if rule.methods is not None and method not in rule.methods: have_match_for.update(rule.methods) continue if rule.websocket != websocket: websocket_mismatch = True continue if self.map.redirect_defaults: redirect_url = self.get_default_redirect(rule, method, rv, query_args) if redirect_url is not None: raise RequestRedirect(redirect_url) if rule.redirect_to is not None: if isinstance(rule.redirect_to, string_types): def _handle_match(match): value = rv[match.group(1)] return rule._converters[match.group(1)].to_url(value) redirect_url = _simple_rule_re.sub(_handle_match, rule.redirect_to) else: redirect_url = rule.redirect_to(self, **rv) raise RequestRedirect( str( url_join( "%s://%s%s%s" % ( self.url_scheme or "http", self.subdomain + "." if self.subdomain else "", self.server_name, self.script_name, ), redirect_url, ) ) ) if require_redirect: raise RequestRedirect( self.make_redirect_url( url_quote(path_info, self.map.charset, safe="/:|+"), query_args ) ) if return_rule: return rule, rv else: return rule.endpoint, rv if have_match_for: raise MethodNotAllowed(valid_methods=list(have_match_for)) if websocket_mismatch: raise WebsocketMismatch() raise NotFound() def test(self, path_info=None, method=None): """Test if a rule would match. Works like `match` but returns `True` if the URL matches, or `False` if it does not exist. :param path_info: the path info to use for matching. Overrides the path info specified on binding. :param method: the HTTP method used for matching. Overrides the method specified on binding. """ try: self.match(path_info, method) except RequestRedirect: pass except HTTPException: return False return True def allowed_methods(self, path_info=None): """Returns the valid methods that match for a given path. .. versionadded:: 0.7 """ try: self.match(path_info, method="--") except MethodNotAllowed as e: return e.valid_methods except HTTPException: pass return [] def get_host(self, domain_part): """Figures out the full host name for the given domain part. The domain part is a subdomain in case host matching is disabled or a full host name. """ if self.map.host_matching: if domain_part is None: return self.server_name return to_unicode(domain_part, "ascii") subdomain = domain_part if subdomain is None: subdomain = self.subdomain else: subdomain = to_unicode(subdomain, "ascii") return (subdomain + u"." if subdomain else u"") + self.server_name def get_default_redirect(self, rule, method, values, query_args): """A helper that returns the URL to redirect to if it finds one. This is used for default redirecting only. :internal: """ assert self.map.redirect_defaults for r in self.map._rules_by_endpoint[rule.endpoint]: # every rule that comes after this one, including ourself # has a lower priority for the defaults. We order the ones # with the highest priority up for building. if r is rule: break if r.provides_defaults_for(rule) and r.suitable_for(values, method): values.update(r.defaults) domain_part, path = r.build(values) return self.make_redirect_url(path, query_args, domain_part=domain_part) def encode_query_args(self, query_args): if not isinstance(query_args, string_types): query_args = url_encode(query_args, self.map.charset) return query_args def make_redirect_url(self, path_info, query_args=None, domain_part=None): """Creates a redirect URL. :internal: """ suffix = "" if query_args: suffix = "?" + self.encode_query_args(query_args) return str( "%s://%s/%s%s" % ( self.url_scheme or "http", self.get_host(domain_part), posixpath.join( self.script_name[:-1].lstrip("/"), path_info.lstrip("/") ), suffix, ) ) def make_alias_redirect_url(self, path, endpoint, values, method, query_args): """Internally called to make an alias redirect URL.""" url = self.build( endpoint, values, method, append_unknown=False, force_external=True ) if query_args: url += "?" + self.encode_query_args(query_args) assert url != path, "detected invalid alias setting. No canonical URL found" return url def _partial_build(self, endpoint, values, method, append_unknown): """Helper for :meth:`build`. Returns subdomain and path for the rule that accepts this endpoint, values and method. :internal: """ # in case the method is none, try with the default method first if method is None: rv = self._partial_build( endpoint, values, self.default_method, append_unknown ) if rv is not None: return rv # Default method did not match or a specific method is passed. # Check all for first match with matching host. If no matching # host is found, go with first result. first_match = None for rule in self.map._rules_by_endpoint.get(endpoint, ()): if rule.suitable_for(values, method): rv = rule.build(values, append_unknown) if rv is not None: rv = (rv[0], rv[1], rule.websocket) if self.map.host_matching: if rv[0] == self.server_name: return rv elif first_match is None: first_match = rv else: return rv return first_match def build( self, endpoint, values=None, method=None, force_external=False, append_unknown=True, ): """Building URLs works pretty much the other way round. Instead of `match` you call `build` and pass it the endpoint and a dict of arguments for the placeholders. The `build` function also accepts an argument called `force_external` which, if you set it to `True` will force external URLs. Per default external URLs (include the server name) will only be used if the target URL is on a different subdomain. >>> m = Map([ ... Rule('/', endpoint='index'), ... Rule('/downloads/', endpoint='downloads/index'), ... Rule('/downloads/<int:id>', endpoint='downloads/show') ... ]) >>> urls = m.bind("example.com", "/") >>> urls.build("index", {}) '/' >>> urls.build("downloads/show", {'id': 42}) '/downloads/42' >>> urls.build("downloads/show", {'id': 42}, force_external=True) 'http://example.com/downloads/42' Because URLs cannot contain non ASCII data you will always get bytestrings back. Non ASCII characters are urlencoded with the charset defined on the map instance. Additional values are converted to unicode and appended to the URL as URL querystring parameters: >>> urls.build("index", {'q': 'My Searchstring'}) '/?q=My+Searchstring' When processing those additional values, lists are furthermore interpreted as multiple values (as per :py:class:`werkzeug.datastructures.MultiDict`): >>> urls.build("index", {'q': ['a', 'b', 'c']}) '/?q=a&q=b&q=c' Passing a ``MultiDict`` will also add multiple values: >>> urls.build("index", MultiDict((('p', 'z'), ('q', 'a'), ('q', 'b')))) '/?p=z&q=a&q=b' If a rule does not exist when building a `BuildError` exception is raised. The build method accepts an argument called `method` which allows you to specify the method you want to have an URL built for if you have different methods for the same endpoint specified. .. versionadded:: 0.6 the `append_unknown` parameter was added. :param endpoint: the endpoint of the URL to build. :param values: the values for the URL to build. Unhandled values are appended to the URL as query parameters. :param method: the HTTP method for the rule if there are different URLs for different methods on the same endpoint. :param force_external: enforce full canonical external URLs. If the URL scheme is not provided, this will generate a protocol-relative URL. :param append_unknown: unknown parameters are appended to the generated URL as query string argument. Disable this if you want the builder to ignore those. """ self.map.update() if values: if isinstance(values, MultiDict): temp_values = {} # iteritems(dict, values) is like `values.lists()` # without the call or `list()` coercion overhead. for key, value in iteritems(dict, values): if not value: continue if len(value) == 1: # flatten single item lists value = value[0] if value is None: # drop None continue temp_values[key] = value values = temp_values else: # drop None values = dict(i for i in iteritems(values) if i[1] is not None) else: values = {} rv = self._partial_build(endpoint, values, method, append_unknown) if rv is None: raise BuildError(endpoint, values, method, self) domain_part, path, websocket = rv host = self.get_host(domain_part) # Always build WebSocket routes with the scheme (browsers # require full URLs). If bound to a WebSocket, ensure that HTTP # routes are built with an HTTP scheme. url_scheme = self.url_scheme secure = url_scheme in {"https", "wss"} if websocket: force_external = True url_scheme = "wss" if secure else "ws" elif url_scheme: url_scheme = "https" if secure else "http" # shortcut this. if not force_external and ( (self.map.host_matching and host == self.server_name) or (not self.map.host_matching and domain_part == self.subdomain) ): return "%s/%s" % (self.script_name.rstrip("/"), path.lstrip("/")) return str( "%s//%s%s/%s" % ( url_scheme + ":" if url_scheme else "", host, self.script_name[:-1], path.lstrip("/"), ) )
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/http.py
# -*- coding: utf-8 -*- """ werkzeug.http ~~~~~~~~~~~~~ Werkzeug comes with a bunch of utilities that help Werkzeug to deal with HTTP data. Most of the classes and functions provided by this module are used by the wrappers, but they are useful on their own, too, especially if the response and request objects are not used. This covers some of the more HTTP centric features of WSGI, some other utilities such as cookie handling are documented in the `werkzeug.utils` module. :copyright: 2007 Pallets :license: BSD-3-Clause """ import base64 import re import warnings from datetime import datetime from datetime import timedelta from hashlib import md5 from time import gmtime from time import time from ._compat import integer_types from ._compat import iteritems from ._compat import PY2 from ._compat import string_types from ._compat import text_type from ._compat import to_bytes from ._compat import to_unicode from ._compat import try_coerce_native from ._internal import _cookie_parse_impl from ._internal import _cookie_quote from ._internal import _make_cookie_domain try: from email.utils import parsedate_tz except ImportError: from email.Utils import parsedate_tz try: from urllib.request import parse_http_list as _parse_list_header from urllib.parse import unquote_to_bytes as _unquote except ImportError: from urllib2 import parse_http_list as _parse_list_header from urllib2 import unquote as _unquote _cookie_charset = "latin1" _basic_auth_charset = "utf-8" # for explanation of "media-range", etc. see Sections 5.3.{1,2} of RFC 7231 _accept_re = re.compile( r""" ( # media-range capturing-parenthesis [^\s;,]+ # type/subtype (?:[ \t]*;[ \t]* # ";" (?: # parameter non-capturing-parenthesis [^\s;,q][^\s;,]* # token that doesn't start with "q" | # or q[^\s;,=][^\s;,]* # token that is more than just "q" ) )* # zero or more parameters ) # end of media-range (?:[ \t]*;[ \t]*q= # weight is a "q" parameter (\d*(?:\.\d+)?) # qvalue capturing-parentheses [^,]* # "extension" accept params: who cares? )? # accept params are optional """, re.VERBOSE, ) _token_chars = frozenset( "!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ^_`abcdefghijklmnopqrstuvwxyz|~" ) _etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)') _unsafe_header_chars = set('()<>@,;:"/[]?={} \t') _option_header_piece_re = re.compile( r""" ;\s*,?\s* # newlines were replaced with commas (?P<key> "[^"\\]*(?:\\.[^"\\]*)*" # quoted string | [^\s;,=*]+ # token ) (?:\*(?P<count>\d+))? # *1, optional continuation index \s* (?: # optionally followed by =value (?: # equals sign, possibly with encoding \*\s*=\s* # * indicates extended notation (?: # optional encoding (?P<encoding>[^\s]+?) '(?P<language>[^\s]*?)' )? | =\s* # basic notation ) (?P<value> "[^"\\]*(?:\\.[^"\\]*)*" # quoted string | [^;,]+ # token )? )? \s* """, flags=re.VERBOSE, ) _option_header_start_mime_type = re.compile(r",\s*([^;,\s]+)([;,]\s*.+)?") _entity_headers = frozenset( [ "allow", "content-encoding", "content-language", "content-length", "content-location", "content-md5", "content-range", "content-type", "expires", "last-modified", ] ) _hop_by_hop_headers = frozenset( [ "connection", "keep-alive", "proxy-authenticate", "proxy-authorization", "te", "trailer", "transfer-encoding", "upgrade", ] ) HTTP_STATUS_CODES = { 100: "Continue", 101: "Switching Protocols", 102: "Processing", 103: "Early Hints", # see RFC 8297 200: "OK", 201: "Created", 202: "Accepted", 203: "Non Authoritative Information", 204: "No Content", 205: "Reset Content", 206: "Partial Content", 207: "Multi Status", 208: "Already Reported", # see RFC 5842 226: "IM Used", # see RFC 3229 300: "Multiple Choices", 301: "Moved Permanently", 302: "Found", 303: "See Other", 304: "Not Modified", 305: "Use Proxy", 306: "Switch Proxy", # unused 307: "Temporary Redirect", 308: "Permanent Redirect", 400: "Bad Request", 401: "Unauthorized", 402: "Payment Required", # unused 403: "Forbidden", 404: "Not Found", 405: "Method Not Allowed", 406: "Not Acceptable", 407: "Proxy Authentication Required", 408: "Request Timeout", 409: "Conflict", 410: "Gone", 411: "Length Required", 412: "Precondition Failed", 413: "Request Entity Too Large", 414: "Request URI Too Long", 415: "Unsupported Media Type", 416: "Requested Range Not Satisfiable", 417: "Expectation Failed", 418: "I'm a teapot", # see RFC 2324 421: "Misdirected Request", # see RFC 7540 422: "Unprocessable Entity", 423: "Locked", 424: "Failed Dependency", 425: "Too Early", # see RFC 8470 426: "Upgrade Required", 428: "Precondition Required", # see RFC 6585 429: "Too Many Requests", 431: "Request Header Fields Too Large", 449: "Retry With", # proprietary MS extension 451: "Unavailable For Legal Reasons", 500: "Internal Server Error", 501: "Not Implemented", 502: "Bad Gateway", 503: "Service Unavailable", 504: "Gateway Timeout", 505: "HTTP Version Not Supported", 506: "Variant Also Negotiates", # see RFC 2295 507: "Insufficient Storage", 508: "Loop Detected", # see RFC 5842 510: "Not Extended", 511: "Network Authentication Failed", # see RFC 6585 } def wsgi_to_bytes(data): """coerce wsgi unicode represented bytes to real ones""" if isinstance(data, bytes): return data return data.encode("latin1") # XXX: utf8 fallback? def bytes_to_wsgi(data): assert isinstance(data, bytes), "data must be bytes" if isinstance(data, str): return data else: return data.decode("latin1") def quote_header_value(value, extra_chars="", allow_token=True): """Quote a header value if necessary. .. versionadded:: 0.5 :param value: the value to quote. :param extra_chars: a list of extra characters to skip quoting. :param allow_token: if this is enabled token values are returned unchanged. """ if isinstance(value, bytes): value = bytes_to_wsgi(value) value = str(value) if allow_token: token_chars = _token_chars | set(extra_chars) if set(value).issubset(token_chars): return value return '"%s"' % value.replace("\\", "\\\\").replace('"', '\\"') def unquote_header_value(value, is_filename=False): r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). This does not use the real unquoting but what browsers are actually using for quoting. .. versionadded:: 0.5 :param value: the header value to unquote. """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] # if this is a filename and the starting characters look like # a UNC path, then just return the value without quotes. Using the # replace sequence below on a UNC path has the effect of turning # the leading double slash into a single slash and then # _fix_ie_filename() doesn't work correctly. See #458. if not is_filename or value[:2] != "\\\\": return value.replace("\\\\", "\\").replace('\\"', '"') return value def dump_options_header(header, options): """The reverse function to :func:`parse_options_header`. :param header: the header to dump :param options: a dict of options to append. """ segments = [] if header is not None: segments.append(header) for key, value in iteritems(options): if value is None: segments.append(key) else: segments.append("%s=%s" % (key, quote_header_value(value))) return "; ".join(segments) def dump_header(iterable, allow_token=True): """Dump an HTTP header again. This is the reversal of :func:`parse_list_header`, :func:`parse_set_header` and :func:`parse_dict_header`. This also quotes strings that include an equals sign unless you pass it as dict of key, value pairs. >>> dump_header({'foo': 'bar baz'}) 'foo="bar baz"' >>> dump_header(('foo', 'bar baz')) 'foo, "bar baz"' :param iterable: the iterable or dict of values to quote. :param allow_token: if set to `False` tokens as values are disallowed. See :func:`quote_header_value` for more details. """ if isinstance(iterable, dict): items = [] for key, value in iteritems(iterable): if value is None: items.append(key) else: items.append( "%s=%s" % (key, quote_header_value(value, allow_token=allow_token)) ) else: items = [quote_header_value(x, allow_token=allow_token) for x in iterable] return ", ".join(items) def dump_csp_header(header): """Dump a Content Security Policy header. These are structured into policies such as "default-src 'self'; script-src 'self'". .. versionadded:: 1.0.0 Support for Content Security Policy headers was added. """ return "; ".join("%s %s" % (key, value) for key, value in iteritems(header)) def parse_list_header(value): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. It basically works like :func:`parse_set_header` just that items may appear multiple times and case sensitivity is preserved. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] To create a header from the :class:`list` again, use the :func:`dump_header` function. :param value: a string with a list header. :return: :class:`list` """ result = [] for item in _parse_list_header(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result def parse_dict_header(value, cls=dict): """Parse lists of key, value pairs as described by RFC 2068 Section 2 and convert them into a python dict (or any other mapping object created from the type with a dict like interface provided by the `cls` argument): >>> d = parse_dict_header('foo="is a fish", bar="as well"') >>> type(d) is dict True >>> sorted(d.items()) [('bar', 'as well'), ('foo', 'is a fish')] If there is no value for a key it will be `None`: >>> parse_dict_header('key_without_value') {'key_without_value': None} To create a header from the :class:`dict` again, use the :func:`dump_header` function. .. versionchanged:: 0.9 Added support for `cls` argument. :param value: a string with a dict header. :param cls: callable to use for storage of parsed results. :return: an instance of `cls` """ result = cls() if not isinstance(value, text_type): # XXX: validate value = bytes_to_wsgi(value) for item in _parse_list_header(value): if "=" not in item: result[item] = None continue name, value = item.split("=", 1) if value[:1] == value[-1:] == '"': value = unquote_header_value(value[1:-1]) result[name] = value return result def parse_options_header(value, multiple=False): """Parse a ``Content-Type`` like header into a tuple with the content type and the options: >>> parse_options_header('text/html; charset=utf8') ('text/html', {'charset': 'utf8'}) This should not be used to parse ``Cache-Control`` like headers that use a slightly different format. For these headers use the :func:`parse_dict_header` function. .. versionchanged:: 0.15 :rfc:`2231` parameter continuations are handled. .. versionadded:: 0.5 :param value: the header to parse. :param multiple: Whether try to parse and return multiple MIME types :return: (mimetype, options) or (mimetype, options, mimetype, options, …) if multiple=True """ if not value: return "", {} result = [] value = "," + value.replace("\n", ",") while value: match = _option_header_start_mime_type.match(value) if not match: break result.append(match.group(1)) # mimetype options = {} # Parse options rest = match.group(2) continued_encoding = None while rest: optmatch = _option_header_piece_re.match(rest) if not optmatch: break option, count, encoding, language, option_value = optmatch.groups() # Continuations don't have to supply the encoding after the # first line. If we're in a continuation, track the current # encoding to use for subsequent lines. Reset it when the # continuation ends. if not count: continued_encoding = None else: if not encoding: encoding = continued_encoding continued_encoding = encoding option = unquote_header_value(option) if option_value is not None: option_value = unquote_header_value(option_value, option == "filename") if encoding is not None: option_value = _unquote(option_value).decode(encoding) if count: # Continuations append to the existing value. For # simplicity, this ignores the possibility of # out-of-order indices, which shouldn't happen anyway. options[option] = options.get(option, "") + option_value else: options[option] = option_value rest = rest[optmatch.end() :] result.append(options) if multiple is False: return tuple(result) value = rest return tuple(result) if result else ("", {}) def parse_accept_header(value, cls=None): """Parses an HTTP Accept-* header. This does not implement a complete valid algorithm but one that supports at least value and quality extraction. Returns a new :class:`Accept` object (basically a list of ``(value, quality)`` tuples sorted by the quality with some additional accessor methods). The second parameter can be a subclass of :class:`Accept` that is created with the parsed values and returned. :param value: the accept header string to be parsed. :param cls: the wrapper class for the return value (can be :class:`Accept` or a subclass thereof) :return: an instance of `cls`. """ if cls is None: cls = Accept if not value: return cls(None) result = [] for match in _accept_re.finditer(value): quality = match.group(2) if not quality: quality = 1 else: quality = max(min(float(quality), 1), 0) result.append((match.group(1), quality)) return cls(result) def parse_cache_control_header(value, on_update=None, cls=None): """Parse a cache control header. The RFC differs between response and request cache control, this method does not. It's your responsibility to not use the wrong control statements. .. versionadded:: 0.5 The `cls` was added. If not specified an immutable :class:`~werkzeug.datastructures.RequestCacheControl` is returned. :param value: a cache control header to be parsed. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.CacheControl` object is changed. :param cls: the class for the returned object. By default :class:`~werkzeug.datastructures.RequestCacheControl` is used. :return: a `cls` object. """ if cls is None: cls = RequestCacheControl if not value: return cls(None, on_update) return cls(parse_dict_header(value), on_update) def parse_csp_header(value, on_update=None, cls=None): """Parse a Content Security Policy header. .. versionadded:: 1.0.0 Support for Content Security Policy headers was added. :param value: a csp header to be parsed. :param on_update: an optional callable that is called every time a value on the object is changed. :param cls: the class for the returned object. By default :class:`~werkzeug.datastructures.ContentSecurityPolicy` is used. :return: a `cls` object. """ if cls is None: cls = ContentSecurityPolicy if value is None: return cls(None, on_update) items = [] for policy in value.split(";"): policy = policy.strip() # Ignore badly formatted policies (no space) if " " in policy: directive, value = policy.strip().split(" ", 1) items.append((directive.strip(), value.strip())) return cls(items, on_update) def parse_set_header(value, on_update=None): """Parse a set-like header and return a :class:`~werkzeug.datastructures.HeaderSet` object: >>> hs = parse_set_header('token, "quoted value"') The return value is an object that treats the items case-insensitively and keeps the order of the items: >>> 'TOKEN' in hs True >>> hs.index('quoted value') 1 >>> hs HeaderSet(['token', 'quoted value']) To create a header from the :class:`HeaderSet` again, use the :func:`dump_header` function. :param value: a set header to be parsed. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.HeaderSet` object is changed. :return: a :class:`~werkzeug.datastructures.HeaderSet` """ if not value: return HeaderSet(None, on_update) return HeaderSet(parse_list_header(value), on_update) def parse_authorization_header(value): """Parse an HTTP basic/digest authorization header transmitted by the web browser. The return value is either `None` if the header was invalid or not given, otherwise an :class:`~werkzeug.datastructures.Authorization` object. :param value: the authorization header to parse. :return: a :class:`~werkzeug.datastructures.Authorization` object or `None`. """ if not value: return value = wsgi_to_bytes(value) try: auth_type, auth_info = value.split(None, 1) auth_type = auth_type.lower() except ValueError: return if auth_type == b"basic": try: username, password = base64.b64decode(auth_info).split(b":", 1) except Exception: return return Authorization( "basic", { "username": to_unicode(username, _basic_auth_charset), "password": to_unicode(password, _basic_auth_charset), }, ) elif auth_type == b"digest": auth_map = parse_dict_header(auth_info) for key in "username", "realm", "nonce", "uri", "response": if key not in auth_map: return if "qop" in auth_map: if not auth_map.get("nc") or not auth_map.get("cnonce"): return return Authorization("digest", auth_map) def parse_www_authenticate_header(value, on_update=None): """Parse an HTTP WWW-Authenticate header into a :class:`~werkzeug.datastructures.WWWAuthenticate` object. :param value: a WWW-Authenticate header to parse. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.WWWAuthenticate` object is changed. :return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object. """ if not value: return WWWAuthenticate(on_update=on_update) try: auth_type, auth_info = value.split(None, 1) auth_type = auth_type.lower() except (ValueError, AttributeError): return WWWAuthenticate(value.strip().lower(), on_update=on_update) return WWWAuthenticate(auth_type, parse_dict_header(auth_info), on_update) def parse_if_range_header(value): """Parses an if-range header which can be an etag or a date. Returns a :class:`~werkzeug.datastructures.IfRange` object. .. versionadded:: 0.7 """ if not value: return IfRange() date = parse_date(value) if date is not None: return IfRange(date=date) # drop weakness information return IfRange(unquote_etag(value)[0]) def parse_range_header(value, make_inclusive=True): """Parses a range header into a :class:`~werkzeug.datastructures.Range` object. If the header is missing or malformed `None` is returned. `ranges` is a list of ``(start, stop)`` tuples where the ranges are non-inclusive. .. versionadded:: 0.7 """ if not value or "=" not in value: return None ranges = [] last_end = 0 units, rng = value.split("=", 1) units = units.strip().lower() for item in rng.split(","): item = item.strip() if "-" not in item: return None if item.startswith("-"): if last_end < 0: return None try: begin = int(item) except ValueError: return None end = None last_end = -1 elif "-" in item: begin, end = item.split("-", 1) begin = begin.strip() end = end.strip() if not begin.isdigit(): return None begin = int(begin) if begin < last_end or last_end < 0: return None if end: if not end.isdigit(): return None end = int(end) + 1 if begin >= end: return None else: end = None last_end = end ranges.append((begin, end)) return Range(units, ranges) def parse_content_range_header(value, on_update=None): """Parses a range header into a :class:`~werkzeug.datastructures.ContentRange` object or `None` if parsing is not possible. .. versionadded:: 0.7 :param value: a content range header to be parsed. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.ContentRange` object is changed. """ if value is None: return None try: units, rangedef = (value or "").strip().split(None, 1) except ValueError: return None if "/" not in rangedef: return None rng, length = rangedef.split("/", 1) if length == "*": length = None elif length.isdigit(): length = int(length) else: return None if rng == "*": return ContentRange(units, None, None, length, on_update=on_update) elif "-" not in rng: return None start, stop = rng.split("-", 1) try: start = int(start) stop = int(stop) + 1 except ValueError: return None if is_byte_range_valid(start, stop, length): return ContentRange(units, start, stop, length, on_update=on_update) def quote_etag(etag, weak=False): """Quote an etag. :param etag: the etag to quote. :param weak: set to `True` to tag it "weak". """ if '"' in etag: raise ValueError("invalid etag") etag = '"%s"' % etag if weak: etag = "W/" + etag return etag def unquote_etag(etag): """Unquote a single etag: >>> unquote_etag('W/"bar"') ('bar', True) >>> unquote_etag('"bar"') ('bar', False) :param etag: the etag identifier to unquote. :return: a ``(etag, weak)`` tuple. """ if not etag: return None, None etag = etag.strip() weak = False if etag.startswith(("W/", "w/")): weak = True etag = etag[2:] if etag[:1] == etag[-1:] == '"': etag = etag[1:-1] return etag, weak def parse_etags(value): """Parse an etag header. :param value: the tag header to parse :return: an :class:`~werkzeug.datastructures.ETags` object. """ if not value: return ETags() strong = [] weak = [] end = len(value) pos = 0 while pos < end: match = _etag_re.match(value, pos) if match is None: break is_weak, quoted, raw = match.groups() if raw == "*": return ETags(star_tag=True) elif quoted: raw = quoted if is_weak: weak.append(raw) else: strong.append(raw) pos = match.end() return ETags(strong, weak) def generate_etag(data): """Generate an etag for some data.""" return md5(data).hexdigest() def parse_date(value): """Parse one of the following date formats into a datetime object: .. sourcecode:: text Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format If parsing fails the return value is `None`. :param value: a string with a supported date format. :return: a :class:`datetime.datetime` object. """ if value: t = parsedate_tz(value.strip()) if t is not None: try: year = t[0] # unfortunately that function does not tell us if two digit # years were part of the string, or if they were prefixed # with two zeroes. So what we do is to assume that 69-99 # refer to 1900, and everything below to 2000 if year >= 0 and year <= 68: year += 2000 elif year >= 69 and year <= 99: year += 1900 return datetime(*((year,) + t[1:7])) - timedelta(seconds=t[-1] or 0) except (ValueError, OverflowError): return None def _dump_date(d, delim): """Used for `http_date` and `cookie_date`.""" if d is None: d = gmtime() elif isinstance(d, datetime): d = d.utctimetuple() elif isinstance(d, (integer_types, float)): d = gmtime(d) return "%s, %02d%s%s%s%04d %02d:%02d:%02d GMT" % ( ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[d.tm_wday], d.tm_mday, delim, ( "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", )[d.tm_mon - 1], delim, d.tm_year, d.tm_hour, d.tm_min, d.tm_sec, ) def cookie_date(expires=None): """Formats the time to ensure compatibility with Netscape's cookie standard. Accepts a floating point number expressed in seconds since the epoch in, a datetime object or a timetuple. All times in UTC. The :func:`parse_date` function can be used to parse such a date. Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``. :param expires: If provided that date is used, otherwise the current. """ return _dump_date(expires, "-") def http_date(timestamp=None): """Formats the time to match the RFC1123 date format. Accepts a floating point number expressed in seconds since the epoch in, a datetime object or a timetuple. All times in UTC. The :func:`parse_date` function can be used to parse such a date. Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``. :param timestamp: If provided that date is used, otherwise the current. """ return _dump_date(timestamp, " ") def parse_age(value=None): """Parses a base-10 integer count of seconds into a timedelta. If parsing fails, the return value is `None`. :param value: a string consisting of an integer represented in base-10 :return: a :class:`datetime.timedelta` object or `None`. """ if not value: return None try: seconds = int(value) except ValueError: return None if seconds < 0: return None try: return timedelta(seconds=seconds) except OverflowError: return None def dump_age(age=None): """Formats the duration as a base-10 integer. :param age: should be an integer number of seconds, a :class:`datetime.timedelta` object, or, if the age is unknown, `None` (default). """ if age is None: return if isinstance(age, timedelta): # do the equivalent of Python 2.7's timedelta.total_seconds(), # but disregarding fractional seconds age = age.seconds + (age.days * 24 * 3600) age = int(age) if age < 0: raise ValueError("age cannot be negative") return str(age) def is_resource_modified( environ, etag=None, data=None, last_modified=None, ignore_if_range=True ): """Convenience method for conditional requests. :param environ: the WSGI environment of the request to be checked. :param etag: the etag for the response for comparison. :param data: or alternatively the data of the response to automatically generate an etag using :func:`generate_etag`. :param last_modified: an optional date of the last modification. :param ignore_if_range: If `False`, `If-Range` header will be taken into account. :return: `True` if the resource was modified, otherwise `False`. .. versionchanged:: 1.0.0 The check is run for methods other than ``GET`` and ``HEAD``. """ if etag is None and data is not None: etag = generate_etag(data) elif data is not None: raise TypeError("both data and etag given") unmodified = False if isinstance(last_modified, string_types): last_modified = parse_date(last_modified) # ensure that microsecond is zero because the HTTP spec does not transmit # that either and we might have some false positives. See issue #39 if last_modified is not None: last_modified = last_modified.replace(microsecond=0) if_range = None if not ignore_if_range and "HTTP_RANGE" in environ: # https://tools.ietf.org/html/rfc7233#section-3.2 # A server MUST ignore an If-Range header field received in a request # that does not contain a Range header field. if_range = parse_if_range_header(environ.get("HTTP_IF_RANGE")) if if_range is not None and if_range.date is not None: modified_since = if_range.date else: modified_since = parse_date(environ.get("HTTP_IF_MODIFIED_SINCE")) if modified_since and last_modified and last_modified <= modified_since: unmodified = True if etag: etag, _ = unquote_etag(etag) if if_range is not None and if_range.etag is not None: unmodified = parse_etags(if_range.etag).contains(etag) else: if_none_match = parse_etags(environ.get("HTTP_IF_NONE_MATCH")) if if_none_match: # https://tools.ietf.org/html/rfc7232#section-3.2 # "A recipient MUST use the weak comparison function when comparing # entity-tags for If-None-Match" unmodified = if_none_match.contains_weak(etag) # https://tools.ietf.org/html/rfc7232#section-3.1 # "Origin server MUST use the strong comparison function when # comparing entity-tags for If-Match" if_match = parse_etags(environ.get("HTTP_IF_MATCH")) if if_match: unmodified = not if_match.is_strong(etag) return not unmodified def remove_entity_headers(headers, allowed=("expires", "content-location")): """Remove all entity headers from a list or :class:`Headers` object. This operation works in-place. `Expires` and `Content-Location` headers are by default not removed. The reason for this is :rfc:`2616` section 10.3.5 which specifies some entity headers that should be sent. .. versionchanged:: 0.5 added `allowed` parameter. :param headers: a list or :class:`Headers` object. :param allowed: a list of headers that should still be allowed even though they are entity headers. """ allowed = set(x.lower() for x in allowed) headers[:] = [ (key, value) for key, value in headers if not is_entity_header(key) or key.lower() in allowed ] def remove_hop_by_hop_headers(headers): """Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or :class:`Headers` object. This operation works in-place. .. versionadded:: 0.5 :param headers: a list or :class:`Headers` object. """ headers[:] = [ (key, value) for key, value in headers if not is_hop_by_hop_header(key) ] def is_entity_header(header): """Check if a header is an entity header. .. versionadded:: 0.5 :param header: the header to test. :return: `True` if it's an entity header, `False` otherwise. """ return header.lower() in _entity_headers def is_hop_by_hop_header(header): """Check if a header is an HTTP/1.1 "Hop-by-Hop" header. .. versionadded:: 0.5 :param header: the header to test. :return: `True` if it's an HTTP/1.1 "Hop-by-Hop" header, `False` otherwise. """ return header.lower() in _hop_by_hop_headers def parse_cookie(header, charset="utf-8", errors="replace", cls=None): """Parse a cookie from a string or WSGI environ. The same key can be provided multiple times, the values are stored in-order. The default :class:`MultiDict` will have the first value first, and all values can be retrieved with :meth:`MultiDict.getlist`. :param header: The cookie header as a string, or a WSGI environ dict with a ``HTTP_COOKIE`` key. :param charset: The charset for the cookie values. :param errors: The error behavior for the charset decoding. :param cls: A dict-like class to store the parsed cookies in. Defaults to :class:`MultiDict`. .. versionchanged:: 1.0.0 Returns a :class:`MultiDict` instead of a ``TypeConversionDict``. .. versionchanged:: 0.5 Returns a :class:`TypeConversionDict` instead of a regular dict. The ``cls`` parameter was added. """ if isinstance(header, dict): header = header.get("HTTP_COOKIE", "") elif header is None: header = "" # On Python 3, PEP 3333 sends headers through the environ as latin1 # decoded strings. Encode strings back to bytes for parsing. if isinstance(header, text_type): header = header.encode("latin1", "replace") if cls is None: cls = MultiDict def _parse_pairs(): for key, val in _cookie_parse_impl(header): key = to_unicode(key, charset, errors, allow_none_charset=True) if not key: continue val = to_unicode(val, charset, errors, allow_none_charset=True) yield try_coerce_native(key), val return cls(_parse_pairs()) def dump_cookie( key, value="", max_age=None, expires=None, path="/", domain=None, secure=False, httponly=False, charset="utf-8", sync_expires=True, max_size=4093, samesite=None, ): """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix The parameters are the same as in the cookie Morsel object in the Python standard library but it accepts unicode data, too. On Python 3 the return value of this function will be a unicode string, on Python 2 it will be a native string. In both cases the return value is usually restricted to ascii as the vast majority of values are properly escaped, but that is no guarantee. If a unicode string is returned it's tunneled through latin1 as required by PEP 3333. The return value is not ASCII safe if the key contains unicode characters. This is technically against the specification but happens in the wild. It's strongly recommended to not use non-ASCII values for the keys. :param max_age: should be a number of seconds, or `None` (default) if the cookie should last only as long as the client's browser session. Additionally `timedelta` objects are accepted, too. :param expires: should be a `datetime` object or unix timestamp. :param path: limits the cookie to a given path, per default it will span the whole domain. :param domain: Use this if you want to set a cross-domain cookie. For example, ``domain=".example.com"`` will set a cookie that is readable by the domain ``www.example.com``, ``foo.example.com`` etc. Otherwise, a cookie will only be readable by the domain that set it. :param secure: The cookie will only be available via HTTPS :param httponly: disallow JavaScript to access the cookie. This is an extension to the cookie standard and probably not supported by all browsers. :param charset: the encoding for unicode values. :param sync_expires: automatically set expires if max_age is defined but expires not. :param max_size: Warn if the final header value exceeds this size. The default, 4093, should be safely `supported by most browsers <cookie_>`_. Set to 0 to disable this check. :param samesite: Limits the scope of the cookie such that it will only be attached to requests if those requests are same-site. .. _`cookie`: http://browsercookielimits.squawky.net/ .. versionchanged:: 1.0.0 The string ``'None'`` is accepted for ``samesite``. """ key = to_bytes(key, charset) value = to_bytes(value, charset) if path is not None: from .urls import iri_to_uri path = iri_to_uri(path, charset) domain = _make_cookie_domain(domain) if isinstance(max_age, timedelta): max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds if expires is not None: if not isinstance(expires, string_types): expires = cookie_date(expires) elif max_age is not None and sync_expires: expires = to_bytes(cookie_date(time() + max_age)) if samesite is not None: samesite = samesite.title() if samesite not in {"Strict", "Lax", "None"}: raise ValueError("SameSite must be 'Strict', 'Lax', or 'None'.") buf = [key + b"=" + _cookie_quote(value)] # XXX: In theory all of these parameters that are not marked with `None` # should be quoted. Because stdlib did not quote it before I did not # want to introduce quoting there now. for k, v, q in ( (b"Domain", domain, True), (b"Expires", expires, False), (b"Max-Age", max_age, False), (b"Secure", secure, None), (b"HttpOnly", httponly, None), (b"Path", path, False), (b"SameSite", samesite, False), ): if q is None: if v: buf.append(k) continue if v is None: continue tmp = bytearray(k) if not isinstance(v, (bytes, bytearray)): v = to_bytes(text_type(v), charset) if q: v = _cookie_quote(v) tmp += b"=" + v buf.append(bytes(tmp)) # The return value will be an incorrectly encoded latin1 header on # Python 3 for consistency with the headers object and a bytestring # on Python 2 because that's how the API makes more sense. rv = b"; ".join(buf) if not PY2: rv = rv.decode("latin1") # Warn if the final value of the cookie is larger than the limit. If the # cookie is too large, then it may be silently ignored by the browser, # which can be quite hard to debug. cookie_size = len(rv) if max_size and cookie_size > max_size: value_size = len(value) warnings.warn( 'The "{key}" cookie is too large: the value was {value_size} bytes' " but the header required {extra_size} extra bytes. The final size" " was {cookie_size} bytes but the limit is {max_size} bytes." " Browsers may silently ignore cookies larger than this.".format( key=key, value_size=value_size, extra_size=cookie_size - value_size, cookie_size=cookie_size, max_size=max_size, ), stacklevel=2, ) return rv def is_byte_range_valid(start, stop, length): """Checks if a given byte content range is valid for the given length. .. versionadded:: 0.7 """ if (start is None) != (stop is None): return False elif start is None: return length is None or length >= 0 elif length is None: return 0 <= start < stop elif start >= stop: return False return 0 <= start < length # circular dependencies from .datastructures import Accept from .datastructures import Authorization from .datastructures import ContentRange from .datastructures import ContentSecurityPolicy from .datastructures import ETags from .datastructures import HeaderSet from .datastructures import IfRange from .datastructures import MultiDict from .datastructures import Range from .datastructures import RequestCacheControl from .datastructures import WWWAuthenticate
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/useragents.py
# -*- coding: utf-8 -*- """ werkzeug.useragents ~~~~~~~~~~~~~~~~~~~ This module provides a helper to inspect user agent strings. This module is far from complete but should work for most of the currently available browsers. :copyright: 2007 Pallets :license: BSD-3-Clause """ import re class UserAgentParser(object): """A simple user agent parser. Used by the `UserAgent`.""" platforms = ( (" cros ", "chromeos"), ("iphone|ios", "iphone"), ("ipad", "ipad"), (r"darwin|mac|os\s*x", "macos"), ("win", "windows"), (r"android", "android"), ("netbsd", "netbsd"), ("openbsd", "openbsd"), ("freebsd", "freebsd"), ("dragonfly", "dragonflybsd"), ("(sun|i86)os", "solaris"), (r"x11|lin(\b|ux)?", "linux"), (r"nintendo\s+wii", "wii"), ("irix", "irix"), ("hp-?ux", "hpux"), ("aix", "aix"), ("sco|unix_sv", "sco"), ("bsd", "bsd"), ("amiga", "amiga"), ("blackberry|playbook", "blackberry"), ("symbian", "symbian"), ) browsers = ( ("googlebot", "google"), ("msnbot", "msn"), ("yahoo", "yahoo"), ("ask jeeves", "ask"), (r"aol|america\s+online\s+browser", "aol"), (r"opera|opr", "opera"), ("edge", "edge"), ("chrome|crios", "chrome"), ("seamonkey", "seamonkey"), ("firefox|firebird|phoenix|iceweasel", "firefox"), ("galeon", "galeon"), ("safari|version", "safari"), ("webkit", "webkit"), ("camino", "camino"), ("konqueror", "konqueror"), ("k-meleon", "kmeleon"), ("netscape", "netscape"), (r"msie|microsoft\s+internet\s+explorer|trident/.+? rv:", "msie"), ("lynx", "lynx"), ("links", "links"), ("Baiduspider", "baidu"), ("bingbot", "bing"), ("mozilla", "mozilla"), ) _browser_version_re = r"(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?" _language_re = re.compile( r"(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|" r"(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)" ) def __init__(self): self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms] self.browsers = [ (b, re.compile(self._browser_version_re % a, re.I)) for a, b in self.browsers ] def __call__(self, user_agent): for platform, regex in self.platforms: # noqa: B007 match = regex.search(user_agent) if match is not None: break else: platform = None for browser, regex in self.browsers: # noqa: B007 match = regex.search(user_agent) if match is not None: version = match.group(1) break else: browser = version = None match = self._language_re.search(user_agent) if match is not None: language = match.group(1) or match.group(2) else: language = None return platform, browser, version, language class UserAgent(object): """Represents a user agent. Pass it a WSGI environment or a user agent string and you can inspect some of the details from the user agent string via the attributes. The following attributes exist: .. attribute:: string the raw user agent string .. attribute:: platform the browser platform. ``None`` if not recognized. The following platforms are currently recognized: - `aix` - `amiga` - `android` - `blackberry` - `bsd` - `chromeos` - `dragonflybsd` - `freebsd` - `hpux` - `ipad` - `iphone` - `irix` - `linux` - `macos` - `netbsd` - `openbsd` - `sco` - `solaris` - `symbian` - `wii` - `windows` .. attribute:: browser the name of the browser. ``None`` if not recognized. The following browsers are currently recognized: - `aol` * - `ask` * - `baidu` * - `bing` * - `camino` - `chrome` - `edge` - `firefox` - `galeon` - `google` * - `kmeleon` - `konqueror` - `links` - `lynx` - `mozilla` - `msie` - `msn` - `netscape` - `opera` - `safari` - `seamonkey` - `webkit` - `yahoo` * (Browsers marked with a star (``*``) are crawlers.) .. attribute:: version the version of the browser. ``None`` if not recognized. .. attribute:: language the language of the browser. ``None`` if not recognized. """ _parser = UserAgentParser() def __init__(self, environ_or_string): if isinstance(environ_or_string, dict): environ_or_string = environ_or_string.get("HTTP_USER_AGENT", "") self.string = environ_or_string self.platform, self.browser, self.version, self.language = self._parser( environ_or_string ) def to_header(self): return self.string def __str__(self): return self.string def __nonzero__(self): return bool(self.browser) __bool__ = __nonzero__ def __repr__(self): return "<%s %r/%s>" % (self.__class__.__name__, self.browser, self.version)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/exceptions.py
# -*- coding: utf-8 -*- """ werkzeug.exceptions ~~~~~~~~~~~~~~~~~~~ This module implements a number of Python exceptions you can raise from within your views to trigger a standard non-200 response. Usage Example ------------- :: from werkzeug.wrappers import BaseRequest from werkzeug.wsgi import responder from werkzeug.exceptions import HTTPException, NotFound def view(request): raise NotFound() @responder def application(environ, start_response): request = BaseRequest(environ) try: return view(request) except HTTPException as e: return e As you can see from this example those exceptions are callable WSGI applications. Because of Python 2.4 compatibility those do not extend from the response objects but only from the python exception class. As a matter of fact they are not Werkzeug response objects. However you can get a response object by calling ``get_response()`` on a HTTP exception. Keep in mind that you have to pass an environment to ``get_response()`` because some errors fetch additional information from the WSGI environment. If you want to hook in a different exception page to say, a 404 status code, you can add a second except for a specific subclass of an error:: @responder def application(environ, start_response): request = BaseRequest(environ) try: return view(request) except NotFound, e: return not_found(request) except HTTPException, e: return e :copyright: 2007 Pallets :license: BSD-3-Clause """ import sys from datetime import datetime from ._compat import implements_to_string from ._compat import integer_types from ._compat import iteritems from ._compat import text_type from ._internal import _get_environ from .utils import escape @implements_to_string class HTTPException(Exception): """Baseclass for all HTTP exceptions. This exception can be called as WSGI application to render a default error page or you can catch the subclasses of it independently and render nicer error messages. """ code = None description = None def __init__(self, description=None, response=None): super(HTTPException, self).__init__() if description is not None: self.description = description self.response = response @classmethod def wrap(cls, exception, name=None): """Create an exception that is a subclass of the calling HTTP exception and the ``exception`` argument. The first argument to the class will be passed to the wrapped ``exception``, the rest to the HTTP exception. If ``e.args`` is not empty and ``e.show_exception`` is ``True``, the wrapped exception message is added to the HTTP error description. .. versionchanged:: 0.15.5 The ``show_exception`` attribute controls whether the description includes the wrapped exception message. .. versionchanged:: 0.15.0 The description includes the wrapped exception message. """ class newcls(cls, exception): _description = cls.description show_exception = False def __init__(self, arg=None, *args, **kwargs): super(cls, self).__init__(*args, **kwargs) if arg is None: exception.__init__(self) else: exception.__init__(self, arg) @property def description(self): if self.show_exception: return "{}\n{}: {}".format( self._description, exception.__name__, exception.__str__(self) ) return self._description @description.setter def description(self, value): self._description = value newcls.__module__ = sys._getframe(1).f_globals.get("__name__") name = name or cls.__name__ + exception.__name__ newcls.__name__ = newcls.__qualname__ = name return newcls @property def name(self): """The status name.""" from .http import HTTP_STATUS_CODES return HTTP_STATUS_CODES.get(self.code, "Unknown Error") def get_description(self, environ=None): """Get the description.""" return u"<p>%s</p>" % escape(self.description).replace("\n", "<br>") def get_body(self, environ=None): """Get the HTML body.""" return text_type( ( u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n' u"<title>%(code)s %(name)s</title>\n" u"<h1>%(name)s</h1>\n" u"%(description)s\n" ) % { "code": self.code, "name": escape(self.name), "description": self.get_description(environ), } ) def get_headers(self, environ=None): """Get a list of headers.""" return [("Content-Type", "text/html; charset=utf-8")] def get_response(self, environ=None): """Get a response object. If one was passed to the exception it's returned directly. :param environ: the optional environ for the request. This can be used to modify the response depending on how the request looked like. :return: a :class:`Response` object or a subclass thereof. """ from .wrappers.response import Response if self.response is not None: return self.response if environ is not None: environ = _get_environ(environ) headers = self.get_headers(environ) return Response(self.get_body(environ), self.code, headers) def __call__(self, environ, start_response): """Call the exception as WSGI application. :param environ: the WSGI environment. :param start_response: the response callable provided by the WSGI server. """ response = self.get_response(environ) return response(environ, start_response) def __str__(self): code = self.code if self.code is not None else "???" return "%s %s: %s" % (code, self.name, self.description) def __repr__(self): code = self.code if self.code is not None else "???" return "<%s '%s: %s'>" % (self.__class__.__name__, code, self.name) class BadRequest(HTTPException): """*400* `Bad Request` Raise if the browser sends something to the application the application or server cannot handle. """ code = 400 description = ( "The browser (or proxy) sent a request that this server could " "not understand." ) class ClientDisconnected(BadRequest): """Internal exception that is raised if Werkzeug detects a disconnected client. Since the client is already gone at that point attempting to send the error message to the client might not work and might ultimately result in another exception in the server. Mainly this is here so that it is silenced by default as far as Werkzeug is concerned. Since disconnections cannot be reliably detected and are unspecified by WSGI to a large extent this might or might not be raised if a client is gone. .. versionadded:: 0.8 """ class SecurityError(BadRequest): """Raised if something triggers a security error. This is otherwise exactly like a bad request error. .. versionadded:: 0.9 """ class BadHost(BadRequest): """Raised if the submitted host is badly formatted. .. versionadded:: 0.11.2 """ class Unauthorized(HTTPException): """*401* ``Unauthorized`` Raise if the user is not authorized to access a resource. The ``www_authenticate`` argument should be used to set the ``WWW-Authenticate`` header. This is used for HTTP basic auth and other schemes. Use :class:`~werkzeug.datastructures.WWWAuthenticate` to create correctly formatted values. Strictly speaking a 401 response is invalid if it doesn't provide at least one value for this header, although real clients typically don't care. :param description: Override the default message used for the body of the response. :param www-authenticate: A single value, or list of values, for the WWW-Authenticate header. .. versionchanged:: 0.15.3 If the ``www_authenticate`` argument is not set, the ``WWW-Authenticate`` header is not set. .. versionchanged:: 0.15.3 The ``response`` argument was restored. .. versionchanged:: 0.15.1 ``description`` was moved back as the first argument, restoring its previous position. .. versionchanged:: 0.15.0 ``www_authenticate`` was added as the first argument, ahead of ``description``. """ code = 401 description = ( "The server could not verify that you are authorized to access" " the URL requested. You either supplied the wrong credentials" " (e.g. a bad password), or your browser doesn't understand" " how to supply the credentials required." ) def __init__(self, description=None, response=None, www_authenticate=None): HTTPException.__init__(self, description, response) if www_authenticate is not None: if not isinstance(www_authenticate, (tuple, list)): www_authenticate = (www_authenticate,) self.www_authenticate = www_authenticate def get_headers(self, environ=None): headers = HTTPException.get_headers(self, environ) if self.www_authenticate: headers.append( ("WWW-Authenticate", ", ".join([str(x) for x in self.www_authenticate])) ) return headers class Forbidden(HTTPException): """*403* `Forbidden` Raise if the user doesn't have the permission for the requested resource but was authenticated. """ code = 403 description = ( "You don't have the permission to access the requested" " resource. It is either read-protected or not readable by the" " server." ) class NotFound(HTTPException): """*404* `Not Found` Raise if a resource does not exist and never existed. """ code = 404 description = ( "The requested URL was not found on the server. If you entered" " the URL manually please check your spelling and try again." ) class MethodNotAllowed(HTTPException): """*405* `Method Not Allowed` Raise if the server used a method the resource does not handle. For example `POST` if the resource is view only. Especially useful for REST. The first argument for this exception should be a list of allowed methods. Strictly speaking the response would be invalid if you don't provide valid methods in the header which you can do with that list. """ code = 405 description = "The method is not allowed for the requested URL." def __init__(self, valid_methods=None, description=None): """Takes an optional list of valid http methods starting with werkzeug 0.3 the list will be mandatory.""" HTTPException.__init__(self, description) self.valid_methods = valid_methods def get_headers(self, environ=None): headers = HTTPException.get_headers(self, environ) if self.valid_methods: headers.append(("Allow", ", ".join(self.valid_methods))) return headers class NotAcceptable(HTTPException): """*406* `Not Acceptable` Raise if the server can't return any content conforming to the `Accept` headers of the client. """ code = 406 description = ( "The resource identified by the request is only capable of" " generating response entities which have content" " characteristics not acceptable according to the accept" " headers sent in the request." ) class RequestTimeout(HTTPException): """*408* `Request Timeout` Raise to signalize a timeout. """ code = 408 description = ( "The server closed the network connection because the browser" " didn't finish the request within the specified time." ) class Conflict(HTTPException): """*409* `Conflict` Raise to signal that a request cannot be completed because it conflicts with the current state on the server. .. versionadded:: 0.7 """ code = 409 description = ( "A conflict happened while processing the request. The" " resource might have been modified while the request was being" " processed." ) class Gone(HTTPException): """*410* `Gone` Raise if a resource existed previously and went away without new location. """ code = 410 description = ( "The requested URL is no longer available on this server and" " there is no forwarding address. If you followed a link from a" " foreign page, please contact the author of this page." ) class LengthRequired(HTTPException): """*411* `Length Required` Raise if the browser submitted data but no ``Content-Length`` header which is required for the kind of processing the server does. """ code = 411 description = ( "A request with this method requires a valid <code>Content-" "Length</code> header." ) class PreconditionFailed(HTTPException): """*412* `Precondition Failed` Status code used in combination with ``If-Match``, ``If-None-Match``, or ``If-Unmodified-Since``. """ code = 412 description = ( "The precondition on the request for the URL failed positive evaluation." ) class RequestEntityTooLarge(HTTPException): """*413* `Request Entity Too Large` The status code one should return if the data submitted exceeded a given limit. """ code = 413 description = "The data value transmitted exceeds the capacity limit." class RequestURITooLarge(HTTPException): """*414* `Request URI Too Large` Like *413* but for too long URLs. """ code = 414 description = ( "The length of the requested URL exceeds the capacity limit for" " this server. The request cannot be processed." ) class UnsupportedMediaType(HTTPException): """*415* `Unsupported Media Type` The status code returned if the server is unable to handle the media type the client transmitted. """ code = 415 description = ( "The server does not support the media type transmitted in the request." ) class RequestedRangeNotSatisfiable(HTTPException): """*416* `Requested Range Not Satisfiable` The client asked for an invalid part of the file. .. versionadded:: 0.7 """ code = 416 description = "The server cannot provide the requested range." def __init__(self, length=None, units="bytes", description=None): """Takes an optional `Content-Range` header value based on ``length`` parameter. """ HTTPException.__init__(self, description) self.length = length self.units = units def get_headers(self, environ=None): headers = HTTPException.get_headers(self, environ) if self.length is not None: headers.append(("Content-Range", "%s */%d" % (self.units, self.length))) return headers class ExpectationFailed(HTTPException): """*417* `Expectation Failed` The server cannot meet the requirements of the Expect request-header. .. versionadded:: 0.7 """ code = 417 description = "The server could not meet the requirements of the Expect header" class ImATeapot(HTTPException): """*418* `I'm a teapot` The server should return this if it is a teapot and someone attempted to brew coffee with it. .. versionadded:: 0.7 """ code = 418 description = "This server is a teapot, not a coffee machine" class UnprocessableEntity(HTTPException): """*422* `Unprocessable Entity` Used if the request is well formed, but the instructions are otherwise incorrect. """ code = 422 description = ( "The request was well-formed but was unable to be followed due" " to semantic errors." ) class Locked(HTTPException): """*423* `Locked` Used if the resource that is being accessed is locked. """ code = 423 description = "The resource that is being accessed is locked." class FailedDependency(HTTPException): """*424* `Failed Dependency` Used if the method could not be performed on the resource because the requested action depended on another action and that action failed. """ code = 424 description = ( "The method could not be performed on the resource because the" " requested action depended on another action and that action" " failed." ) class PreconditionRequired(HTTPException): """*428* `Precondition Required` The server requires this request to be conditional, typically to prevent the lost update problem, which is a race condition between two or more clients attempting to update a resource through PUT or DELETE. By requiring each client to include a conditional header ("If-Match" or "If-Unmodified- Since") with the proper value retained from a recent GET request, the server ensures that each client has at least seen the previous revision of the resource. """ code = 428 description = ( "This request is required to be conditional; try using" ' "If-Match" or "If-Unmodified-Since".' ) class _RetryAfter(HTTPException): """Adds an optional ``retry_after`` parameter which will set the ``Retry-After`` header. May be an :class:`int` number of seconds or a :class:`~datetime.datetime`. """ def __init__(self, description=None, response=None, retry_after=None): super(_RetryAfter, self).__init__(description, response) self.retry_after = retry_after def get_headers(self, environ=None): headers = super(_RetryAfter, self).get_headers(environ) if self.retry_after: if isinstance(self.retry_after, datetime): from .http import http_date value = http_date(self.retry_after) else: value = str(self.retry_after) headers.append(("Retry-After", value)) return headers class TooManyRequests(_RetryAfter): """*429* `Too Many Requests` The server is limiting the rate at which this user receives responses, and this request exceeds that rate. (The server may use any convenient method to identify users and their request rates). The server may include a "Retry-After" header to indicate how long the user should wait before retrying. :param retry_after: If given, set the ``Retry-After`` header to this value. May be an :class:`int` number of seconds or a :class:`~datetime.datetime`. .. versionchanged:: 1.0 Added ``retry_after`` parameter. """ code = 429 description = "This user has exceeded an allotted request count. Try again later." class RequestHeaderFieldsTooLarge(HTTPException): """*431* `Request Header Fields Too Large` The server refuses to process the request because the header fields are too large. One or more individual fields may be too large, or the set of all headers is too large. """ code = 431 description = "One or more header fields exceeds the maximum size." class UnavailableForLegalReasons(HTTPException): """*451* `Unavailable For Legal Reasons` This status code indicates that the server is denying access to the resource as a consequence of a legal demand. """ code = 451 description = "Unavailable for legal reasons." class InternalServerError(HTTPException): """*500* `Internal Server Error` Raise if an internal server error occurred. This is a good fallback if an unknown error occurred in the dispatcher. .. versionchanged:: 1.0.0 Added the :attr:`original_exception` attribute. """ code = 500 description = ( "The server encountered an internal error and was unable to" " complete your request. Either the server is overloaded or" " there is an error in the application." ) def __init__(self, description=None, response=None, original_exception=None): #: The original exception that caused this 500 error. Can be #: used by frameworks to provide context when handling #: unexpected errors. self.original_exception = original_exception super(InternalServerError, self).__init__( description=description, response=response ) class NotImplemented(HTTPException): """*501* `Not Implemented` Raise if the application does not support the action requested by the browser. """ code = 501 description = "The server does not support the action requested by the browser." class BadGateway(HTTPException): """*502* `Bad Gateway` If you do proxying in your application you should return this status code if you received an invalid response from the upstream server it accessed in attempting to fulfill the request. """ code = 502 description = ( "The proxy server received an invalid response from an upstream server." ) class ServiceUnavailable(_RetryAfter): """*503* `Service Unavailable` Status code you should return if a service is temporarily unavailable. :param retry_after: If given, set the ``Retry-After`` header to this value. May be an :class:`int` number of seconds or a :class:`~datetime.datetime`. .. versionchanged:: 1.0 Added ``retry_after`` parameter. """ code = 503 description = ( "The server is temporarily unable to service your request due" " to maintenance downtime or capacity problems. Please try" " again later." ) class GatewayTimeout(HTTPException): """*504* `Gateway Timeout` Status code you should return if a connection to an upstream server times out. """ code = 504 description = "The connection to an upstream server timed out." class HTTPVersionNotSupported(HTTPException): """*505* `HTTP Version Not Supported` The server does not support the HTTP protocol version used in the request. """ code = 505 description = ( "The server does not support the HTTP protocol version used in the request." ) default_exceptions = {} __all__ = ["HTTPException"] def _find_exceptions(): for _name, obj in iteritems(globals()): try: is_http_exception = issubclass(obj, HTTPException) except TypeError: is_http_exception = False if not is_http_exception or obj.code is None: continue __all__.append(obj.__name__) old_obj = default_exceptions.get(obj.code, None) if old_obj is not None and issubclass(obj, old_obj): continue default_exceptions[obj.code] = obj _find_exceptions() del _find_exceptions class Aborter(object): """When passed a dict of code -> exception items it can be used as callable that raises exceptions. If the first argument to the callable is an integer it will be looked up in the mapping, if it's a WSGI application it will be raised in a proxy exception. The rest of the arguments are forwarded to the exception constructor. """ def __init__(self, mapping=None, extra=None): if mapping is None: mapping = default_exceptions self.mapping = dict(mapping) if extra is not None: self.mapping.update(extra) def __call__(self, code, *args, **kwargs): if not args and not kwargs and not isinstance(code, integer_types): raise HTTPException(response=code) if code not in self.mapping: raise LookupError("no exception for %r" % code) raise self.mapping[code](*args, **kwargs) def abort(status, *args, **kwargs): """Raises an :py:exc:`HTTPException` for the given status code or WSGI application. If a status code is given, it will be looked up in the list of exceptions and will raise that exception. If passed a WSGI application, it will wrap it in a proxy WSGI exception and raise that:: abort(404) # 404 Not Found abort(Response('Hello World')) """ return _aborter(status, *args, **kwargs) _aborter = Aborter() #: An exception that is used to signal both a :exc:`KeyError` and a #: :exc:`BadRequest`. Used by many of the datastructures. BadRequestKeyError = BadRequest.wrap(KeyError)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/_compat.py
# flake8: noqa # This whole file is full of lint errors import functools import operator import sys try: import builtins except ImportError: import __builtin__ as builtins PY2 = sys.version_info[0] == 2 WIN = sys.platform.startswith("win") _identity = lambda x: x if PY2: unichr = unichr text_type = unicode string_types = (str, unicode) integer_types = (int, long) iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs) itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs) iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs) iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs) iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs) int_to_byte = chr iter_bytes = iter import collections as collections_abc exec("def reraise(tp, value, tb=None):\n raise tp, value, tb") def fix_tuple_repr(obj): def __repr__(self): cls = self.__class__ return "%s(%s)" % ( cls.__name__, ", ".join( "%s=%r" % (field, self[index]) for index, field in enumerate(cls._fields) ), ) obj.__repr__ = __repr__ return obj def implements_iterator(cls): cls.next = cls.__next__ del cls.__next__ return cls def implements_to_string(cls): cls.__unicode__ = cls.__str__ cls.__str__ = lambda x: x.__unicode__().encode("utf-8") return cls def native_string_result(func): def wrapper(*args, **kwargs): return func(*args, **kwargs).encode("utf-8") return functools.update_wrapper(wrapper, func) def implements_bool(cls): cls.__nonzero__ = cls.__bool__ del cls.__bool__ return cls from itertools import imap, izip, ifilter range_type = xrange from StringIO import StringIO from cStringIO import StringIO as BytesIO NativeStringIO = BytesIO def make_literal_wrapper(reference): return _identity def normalize_string_tuple(tup): """Normalizes a string tuple to a common type. Following Python 2 rules, upgrades to unicode are implicit. """ if any(isinstance(x, text_type) for x in tup): return tuple(to_unicode(x) for x in tup) return tup def try_coerce_native(s): """Try to coerce a unicode string to native if possible. Otherwise, leave it as unicode. """ try: return to_native(s) except UnicodeError: return s wsgi_get_bytes = _identity def wsgi_decoding_dance(s, charset="utf-8", errors="replace"): return s.decode(charset, errors) def wsgi_encoding_dance(s, charset="utf-8", errors="replace"): if isinstance(s, bytes): return s return s.encode(charset, errors) def to_bytes(x, charset=sys.getdefaultencoding(), errors="strict"): if x is None: return None if isinstance(x, (bytes, bytearray, buffer)): return bytes(x) if isinstance(x, unicode): return x.encode(charset, errors) raise TypeError("Expected bytes") def to_native(x, charset=sys.getdefaultencoding(), errors="strict"): if x is None or isinstance(x, str): return x return x.encode(charset, errors) else: unichr = chr text_type = str string_types = (str,) integer_types = (int,) iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs)) itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs)) iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs)) iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs)) iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs)) int_to_byte = operator.methodcaller("to_bytes", 1, "big") iter_bytes = functools.partial(map, int_to_byte) import collections.abc as collections_abc def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value fix_tuple_repr = _identity implements_iterator = _identity implements_to_string = _identity implements_bool = _identity native_string_result = _identity imap = map izip = zip ifilter = filter range_type = range from io import StringIO, BytesIO NativeStringIO = StringIO _latin1_encode = operator.methodcaller("encode", "latin1") def make_literal_wrapper(reference): if isinstance(reference, text_type): return _identity return _latin1_encode def normalize_string_tuple(tup): """Ensures that all types in the tuple are either strings or bytes. """ tupiter = iter(tup) is_text = isinstance(next(tupiter, None), text_type) for arg in tupiter: if isinstance(arg, text_type) != is_text: raise TypeError( "Cannot mix str and bytes arguments (got %s)" % repr(tup) ) return tup try_coerce_native = _identity wsgi_get_bytes = _latin1_encode def wsgi_decoding_dance(s, charset="utf-8", errors="replace"): return s.encode("latin1").decode(charset, errors) def wsgi_encoding_dance(s, charset="utf-8", errors="replace"): if isinstance(s, text_type): s = s.encode(charset) return s.decode("latin1", errors) def to_bytes(x, charset=sys.getdefaultencoding(), errors="strict"): if x is None: return None if isinstance(x, (bytes, bytearray, memoryview)): # noqa return bytes(x) if isinstance(x, str): return x.encode(charset, errors) raise TypeError("Expected bytes") def to_native(x, charset=sys.getdefaultencoding(), errors="strict"): if x is None or isinstance(x, str): return x return x.decode(charset, errors) def to_unicode( x, charset=sys.getdefaultencoding(), errors="strict", allow_none_charset=False ): if x is None: return None if not isinstance(x, bytes): return text_type(x) if charset is None and allow_none_charset: return x return x.decode(charset, errors) try: from os import fspath except ImportError: # Python < 3.6 # https://www.python.org/dev/peps/pep-0519/#backwards-compatibility def fspath(path): return path.__fspath__() if hasattr(path, "__fspath__") else path
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/datastructures.py
# -*- coding: utf-8 -*- """ werkzeug.datastructures ~~~~~~~~~~~~~~~~~~~~~~~ This module provides mixins and classes with an immutable interface. :copyright: 2007 Pallets :license: BSD-3-Clause """ import codecs import mimetypes import re from copy import deepcopy from itertools import repeat from . import exceptions from ._compat import BytesIO from ._compat import collections_abc from ._compat import fspath from ._compat import integer_types from ._compat import iteritems from ._compat import iterkeys from ._compat import iterlists from ._compat import itervalues from ._compat import make_literal_wrapper from ._compat import PY2 from ._compat import string_types from ._compat import text_type from ._compat import to_native from ._internal import _missing from .filesystem import get_filesystem_encoding def is_immutable(self): raise TypeError("%r objects are immutable" % self.__class__.__name__) def iter_multi_items(mapping): """Iterates over the items of a mapping yielding keys and values without dropping any from more complex structures. """ if isinstance(mapping, MultiDict): for item in iteritems(mapping, multi=True): yield item elif isinstance(mapping, dict): for key, value in iteritems(mapping): if isinstance(value, (tuple, list)): for v in value: yield key, v else: yield key, value else: for item in mapping: yield item def native_itermethods(names): if not PY2: return lambda x: x def setviewmethod(cls, name): viewmethod_name = "view%s" % name repr_name = "view_%s" % name def viewmethod(self, *a, **kw): return ViewItems(self, name, repr_name, *a, **kw) viewmethod.__name__ = viewmethod_name viewmethod.__doc__ = "`%s()` object providing a view on %s" % ( viewmethod_name, name, ) setattr(cls, viewmethod_name, viewmethod) def setitermethod(cls, name): itermethod = getattr(cls, name) setattr(cls, "iter%s" % name, itermethod) def listmethod(self, *a, **kw): return list(itermethod(self, *a, **kw)) listmethod.__name__ = name listmethod.__doc__ = "Like :py:meth:`iter%s`, but returns a list." % name setattr(cls, name, listmethod) def wrap(cls): for name in names: setitermethod(cls, name) setviewmethod(cls, name) return cls return wrap class ImmutableListMixin(object): """Makes a :class:`list` immutable. .. versionadded:: 0.5 :private: """ _hash_cache = None def __hash__(self): if self._hash_cache is not None: return self._hash_cache rv = self._hash_cache = hash(tuple(self)) return rv def __reduce_ex__(self, protocol): return type(self), (list(self),) def __delitem__(self, key): is_immutable(self) def __iadd__(self, other): is_immutable(self) __imul__ = __iadd__ def __setitem__(self, key, value): is_immutable(self) def append(self, item): is_immutable(self) remove = append def extend(self, iterable): is_immutable(self) def insert(self, pos, value): is_immutable(self) def pop(self, index=-1): is_immutable(self) def reverse(self): is_immutable(self) def sort(self, cmp=None, key=None, reverse=None): is_immutable(self) class ImmutableList(ImmutableListMixin, list): """An immutable :class:`list`. .. versionadded:: 0.5 :private: """ def __repr__(self): return "%s(%s)" % (self.__class__.__name__, list.__repr__(self)) class ImmutableDictMixin(object): """Makes a :class:`dict` immutable. .. versionadded:: 0.5 :private: """ _hash_cache = None @classmethod def fromkeys(cls, keys, value=None): instance = super(cls, cls).__new__(cls) instance.__init__(zip(keys, repeat(value))) return instance def __reduce_ex__(self, protocol): return type(self), (dict(self),) def _iter_hashitems(self): return iteritems(self) def __hash__(self): if self._hash_cache is not None: return self._hash_cache rv = self._hash_cache = hash(frozenset(self._iter_hashitems())) return rv def setdefault(self, key, default=None): is_immutable(self) def update(self, *args, **kwargs): is_immutable(self) def pop(self, key, default=None): is_immutable(self) def popitem(self): is_immutable(self) def __setitem__(self, key, value): is_immutable(self) def __delitem__(self, key): is_immutable(self) def clear(self): is_immutable(self) class ImmutableMultiDictMixin(ImmutableDictMixin): """Makes a :class:`MultiDict` immutable. .. versionadded:: 0.5 :private: """ def __reduce_ex__(self, protocol): return type(self), (list(iteritems(self, multi=True)),) def _iter_hashitems(self): return iteritems(self, multi=True) def add(self, key, value): is_immutable(self) def popitemlist(self): is_immutable(self) def poplist(self, key): is_immutable(self) def setlist(self, key, new_list): is_immutable(self) def setlistdefault(self, key, default_list=None): is_immutable(self) class UpdateDictMixin(object): """Makes dicts call `self.on_update` on modifications. .. versionadded:: 0.5 :private: """ on_update = None def calls_update(name): # noqa: B902 def oncall(self, *args, **kw): rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw) if self.on_update is not None: self.on_update(self) return rv oncall.__name__ = name return oncall def setdefault(self, key, default=None): modified = key not in self rv = super(UpdateDictMixin, self).setdefault(key, default) if modified and self.on_update is not None: self.on_update(self) return rv def pop(self, key, default=_missing): modified = key in self if default is _missing: rv = super(UpdateDictMixin, self).pop(key) else: rv = super(UpdateDictMixin, self).pop(key, default) if modified and self.on_update is not None: self.on_update(self) return rv __setitem__ = calls_update("__setitem__") __delitem__ = calls_update("__delitem__") clear = calls_update("clear") popitem = calls_update("popitem") update = calls_update("update") del calls_update class TypeConversionDict(dict): """Works like a regular dict but the :meth:`get` method can perform type conversions. :class:`MultiDict` and :class:`CombinedMultiDict` are subclasses of this class and provide the same feature. .. versionadded:: 0.5 """ def get(self, key, default=None, type=None): """Return the default value if the requested data doesn't exist. If `type` is provided and is a callable it should convert the value, return it or raise a :exc:`ValueError` if that is not possible. In this case the function will return the default as if the value was not found: >>> d = TypeConversionDict(foo='42', bar='blub') >>> d.get('foo', type=int) 42 >>> d.get('bar', -1, type=int) -1 :param key: The key to be looked up. :param default: The default value to be returned if the key can't be looked up. If not further specified `None` is returned. :param type: A callable that is used to cast the value in the :class:`MultiDict`. If a :exc:`ValueError` is raised by this callable the default value is returned. """ try: rv = self[key] except KeyError: return default if type is not None: try: rv = type(rv) except ValueError: rv = default return rv class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict): """Works like a :class:`TypeConversionDict` but does not support modifications. .. versionadded:: 0.5 """ def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return TypeConversionDict(self) def __copy__(self): return self class ViewItems(object): def __init__(self, multi_dict, method, repr_name, *a, **kw): self.__multi_dict = multi_dict self.__method = method self.__repr_name = repr_name self.__a = a self.__kw = kw def __get_items(self): return getattr(self.__multi_dict, self.__method)(*self.__a, **self.__kw) def __repr__(self): return "%s(%r)" % (self.__repr_name, list(self.__get_items())) def __iter__(self): return iter(self.__get_items()) @native_itermethods(["keys", "values", "items", "lists", "listvalues"]) class MultiDict(TypeConversionDict): """A :class:`MultiDict` is a dictionary subclass customized to deal with multiple values for the same key which is for example used by the parsing functions in the wrappers. This is necessary because some HTML form elements pass multiple values for the same key. :class:`MultiDict` implements all standard dictionary methods. Internally, it saves all values for a key as a list, but the standard dict access methods will only return the first value for a key. If you want to gain access to the other values, too, you have to use the `list` methods as explained below. Basic Usage: >>> d = MultiDict([('a', 'b'), ('a', 'c')]) >>> d MultiDict([('a', 'b'), ('a', 'c')]) >>> d['a'] 'b' >>> d.getlist('a') ['b', 'c'] >>> 'a' in d True It behaves like a normal dict thus all dict functions will only return the first value when multiple values for one key are found. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. A :class:`MultiDict` can be constructed from an iterable of ``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2 onwards some keyword parameters. :param mapping: the initial value for the :class:`MultiDict`. Either a regular dict, an iterable of ``(key, value)`` tuples or `None`. """ def __init__(self, mapping=None): if isinstance(mapping, MultiDict): dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping))) elif isinstance(mapping, dict): tmp = {} for key, value in iteritems(mapping): if isinstance(value, (tuple, list)): if len(value) == 0: continue value = list(value) else: value = [value] tmp[key] = value dict.__init__(self, tmp) else: tmp = {} for key, value in mapping or (): tmp.setdefault(key, []).append(value) dict.__init__(self, tmp) def __getstate__(self): return dict(self.lists()) def __setstate__(self, value): dict.clear(self) dict.update(self, value) def __getitem__(self, key): """Return the first data value for this key; raises KeyError if not found. :param key: The key to be looked up. :raise KeyError: if the key does not exist. """ if key in self: lst = dict.__getitem__(self, key) if len(lst) > 0: return lst[0] raise exceptions.BadRequestKeyError(key) def __setitem__(self, key, value): """Like :meth:`add` but removes an existing key first. :param key: the key for the value. :param value: the value to set. """ dict.__setitem__(self, key, [value]) def add(self, key, value): """Adds a new value for the key. .. versionadded:: 0.6 :param key: the key for the value. :param value: the value to add. """ dict.setdefault(self, key, []).append(value) def getlist(self, key, type=None): """Return the list of items for a given key. If that key is not in the `MultiDict`, the return value will be an empty list. Just as `get` `getlist` accepts a `type` parameter. All items will be converted with the callable defined there. :param key: The key to be looked up. :param type: A callable that is used to cast the value in the :class:`MultiDict`. If a :exc:`ValueError` is raised by this callable the value will be removed from the list. :return: a :class:`list` of all the values for the key. """ try: rv = dict.__getitem__(self, key) except KeyError: return [] if type is None: return list(rv) result = [] for item in rv: try: result.append(type(item)) except ValueError: pass return result def setlist(self, key, new_list): """Remove the old values for a key and add new ones. Note that the list you pass the values in will be shallow-copied before it is inserted in the dictionary. >>> d = MultiDict() >>> d.setlist('foo', ['1', '2']) >>> d['foo'] '1' >>> d.getlist('foo') ['1', '2'] :param key: The key for which the values are set. :param new_list: An iterable with the new values for the key. Old values are removed first. """ dict.__setitem__(self, key, list(new_list)) def setdefault(self, key, default=None): """Returns the value for the key if it is in the dict, otherwise it returns `default` and sets that value for `key`. :param key: The key to be looked up. :param default: The default value to be returned if the key is not in the dict. If not further specified it's `None`. """ if key not in self: self[key] = default else: default = self[key] return default def setlistdefault(self, key, default_list=None): """Like `setdefault` but sets multiple values. The list returned is not a copy, but the list that is actually used internally. This means that you can put new values into the dict by appending items to the list: >>> d = MultiDict({"foo": 1}) >>> d.setlistdefault("foo").extend([2, 3]) >>> d.getlist("foo") [1, 2, 3] :param key: The key to be looked up. :param default_list: An iterable of default values. It is either copied (in case it was a list) or converted into a list before returned. :return: a :class:`list` """ if key not in self: default_list = list(default_list or ()) dict.__setitem__(self, key, default_list) else: default_list = dict.__getitem__(self, key) return default_list def items(self, multi=False): """Return an iterator of ``(key, value)`` pairs. :param multi: If set to `True` the iterator returned will have a pair for each value of each key. Otherwise it will only contain pairs for the first value of each key. """ for key, values in iteritems(dict, self): if multi: for value in values: yield key, value else: yield key, values[0] def lists(self): """Return a iterator of ``(key, values)`` pairs, where values is the list of all values associated with the key.""" for key, values in iteritems(dict, self): yield key, list(values) def keys(self): return iterkeys(dict, self) __iter__ = keys def values(self): """Returns an iterator of the first value on every key's value list.""" for values in itervalues(dict, self): yield values[0] def listvalues(self): """Return an iterator of all values associated with a key. Zipping :meth:`keys` and this is the same as calling :meth:`lists`: >>> d = MultiDict({"foo": [1, 2, 3]}) >>> zip(d.keys(), d.listvalues()) == d.lists() True """ return itervalues(dict, self) def copy(self): """Return a shallow copy of this object.""" return self.__class__(self) def deepcopy(self, memo=None): """Return a deep copy of this object.""" return self.__class__(deepcopy(self.to_dict(flat=False), memo)) def to_dict(self, flat=True): """Return the contents as regular dict. If `flat` is `True` the returned dict will only have the first item present, if `flat` is `False` all values will be returned as lists. :param flat: If set to `False` the dict returned will have lists with all the values in it. Otherwise it will only contain the first value for each key. :return: a :class:`dict` """ if flat: return dict(iteritems(self)) return dict(self.lists()) def update(self, other_dict): """update() extends rather than replaces existing key lists: >>> a = MultiDict({'x': 1}) >>> b = MultiDict({'x': 2, 'y': 3}) >>> a.update(b) >>> a MultiDict([('y', 3), ('x', 1), ('x', 2)]) If the value list for a key in ``other_dict`` is empty, no new values will be added to the dict and the key will not be created: >>> x = {'empty_list': []} >>> y = MultiDict() >>> y.update(x) >>> y MultiDict([]) """ for key, value in iter_multi_items(other_dict): MultiDict.add(self, key, value) def pop(self, key, default=_missing): """Pop the first item for a list on the dict. Afterwards the key is removed from the dict, so additional values are discarded: >>> d = MultiDict({"foo": [1, 2, 3]}) >>> d.pop("foo") 1 >>> "foo" in d False :param key: the key to pop. :param default: if provided the value to return if the key was not in the dictionary. """ try: lst = dict.pop(self, key) if len(lst) == 0: raise exceptions.BadRequestKeyError(key) return lst[0] except KeyError: if default is not _missing: return default raise exceptions.BadRequestKeyError(key) def popitem(self): """Pop an item from the dict.""" try: item = dict.popitem(self) if len(item[1]) == 0: raise exceptions.BadRequestKeyError(item) return (item[0], item[1][0]) except KeyError as e: raise exceptions.BadRequestKeyError(e.args[0]) def poplist(self, key): """Pop the list for a key from the dict. If the key is not in the dict an empty list is returned. .. versionchanged:: 0.5 If the key does no longer exist a list is returned instead of raising an error. """ return dict.pop(self, key, []) def popitemlist(self): """Pop a ``(key, list)`` tuple from the dict.""" try: return dict.popitem(self) except KeyError as e: raise exceptions.BadRequestKeyError(e.args[0]) def __copy__(self): return self.copy() def __deepcopy__(self, memo): return self.deepcopy(memo=memo) def __repr__(self): return "%s(%r)" % (self.__class__.__name__, list(iteritems(self, multi=True))) class _omd_bucket(object): """Wraps values in the :class:`OrderedMultiDict`. This makes it possible to keep an order over multiple different keys. It requires a lot of extra memory and slows down access a lot, but makes it possible to access elements in O(1) and iterate in O(n). """ __slots__ = ("prev", "key", "value", "next") def __init__(self, omd, key, value): self.prev = omd._last_bucket self.key = key self.value = value self.next = None if omd._first_bucket is None: omd._first_bucket = self if omd._last_bucket is not None: omd._last_bucket.next = self omd._last_bucket = self def unlink(self, omd): if self.prev: self.prev.next = self.next if self.next: self.next.prev = self.prev if omd._first_bucket is self: omd._first_bucket = self.next if omd._last_bucket is self: omd._last_bucket = self.prev @native_itermethods(["keys", "values", "items", "lists", "listvalues"]) class OrderedMultiDict(MultiDict): """Works like a regular :class:`MultiDict` but preserves the order of the fields. To convert the ordered multi dict into a list you can use the :meth:`items` method and pass it ``multi=True``. In general an :class:`OrderedMultiDict` is an order of magnitude slower than a :class:`MultiDict`. .. admonition:: note Due to a limitation in Python you cannot convert an ordered multi dict into a regular dict by using ``dict(multidict)``. Instead you have to use the :meth:`to_dict` method, otherwise the internal bucket objects are exposed. """ def __init__(self, mapping=None): dict.__init__(self) self._first_bucket = self._last_bucket = None if mapping is not None: OrderedMultiDict.update(self, mapping) def __eq__(self, other): if not isinstance(other, MultiDict): return NotImplemented if isinstance(other, OrderedMultiDict): iter1 = iteritems(self, multi=True) iter2 = iteritems(other, multi=True) try: for k1, v1 in iter1: k2, v2 = next(iter2) if k1 != k2 or v1 != v2: return False except StopIteration: return False try: next(iter2) except StopIteration: return True return False if len(self) != len(other): return False for key, values in iterlists(self): if other.getlist(key) != values: return False return True __hash__ = None def __ne__(self, other): return not self.__eq__(other) def __reduce_ex__(self, protocol): return type(self), (list(iteritems(self, multi=True)),) def __getstate__(self): return list(iteritems(self, multi=True)) def __setstate__(self, values): dict.clear(self) for key, value in values: self.add(key, value) def __getitem__(self, key): if key in self: return dict.__getitem__(self, key)[0].value raise exceptions.BadRequestKeyError(key) def __setitem__(self, key, value): self.poplist(key) self.add(key, value) def __delitem__(self, key): self.pop(key) def keys(self): return (key for key, value in iteritems(self)) __iter__ = keys def values(self): return (value for key, value in iteritems(self)) def items(self, multi=False): ptr = self._first_bucket if multi: while ptr is not None: yield ptr.key, ptr.value ptr = ptr.next else: returned_keys = set() while ptr is not None: if ptr.key not in returned_keys: returned_keys.add(ptr.key) yield ptr.key, ptr.value ptr = ptr.next def lists(self): returned_keys = set() ptr = self._first_bucket while ptr is not None: if ptr.key not in returned_keys: yield ptr.key, self.getlist(ptr.key) returned_keys.add(ptr.key) ptr = ptr.next def listvalues(self): for _key, values in iterlists(self): yield values def add(self, key, value): dict.setdefault(self, key, []).append(_omd_bucket(self, key, value)) def getlist(self, key, type=None): try: rv = dict.__getitem__(self, key) except KeyError: return [] if type is None: return [x.value for x in rv] result = [] for item in rv: try: result.append(type(item.value)) except ValueError: pass return result def setlist(self, key, new_list): self.poplist(key) for value in new_list: self.add(key, value) def setlistdefault(self, key, default_list=None): raise TypeError("setlistdefault is unsupported for ordered multi dicts") def update(self, mapping): for key, value in iter_multi_items(mapping): OrderedMultiDict.add(self, key, value) def poplist(self, key): buckets = dict.pop(self, key, ()) for bucket in buckets: bucket.unlink(self) return [x.value for x in buckets] def pop(self, key, default=_missing): try: buckets = dict.pop(self, key) except KeyError: if default is not _missing: return default raise exceptions.BadRequestKeyError(key) for bucket in buckets: bucket.unlink(self) return buckets[0].value def popitem(self): try: key, buckets = dict.popitem(self) except KeyError as e: raise exceptions.BadRequestKeyError(e.args[0]) for bucket in buckets: bucket.unlink(self) return key, buckets[0].value def popitemlist(self): try: key, buckets = dict.popitem(self) except KeyError as e: raise exceptions.BadRequestKeyError(e.args[0]) for bucket in buckets: bucket.unlink(self) return key, [x.value for x in buckets] def _options_header_vkw(value, kw): return dump_options_header( value, dict((k.replace("_", "-"), v) for k, v in kw.items()) ) def _unicodify_header_value(value): if isinstance(value, bytes): value = value.decode("latin-1") if not isinstance(value, text_type): value = text_type(value) return value @native_itermethods(["keys", "values", "items"]) class Headers(object): """An object that stores some headers. It has a dict-like interface but is ordered and can store the same keys multiple times. This data structure is useful if you want a nicer way to handle WSGI headers which are stored as tuples in a list. From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is also a subclass of the :class:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers` class, with the exception of `__getitem__`. :mod:`wsgiref` will return `None` for ``headers['missing']``, whereas :class:`Headers` will raise a :class:`KeyError`. To create a new :class:`Headers` object pass it a list or dict of headers which are used as default values. This does not reuse the list passed to the constructor for internal usage. :param defaults: The list of default values for the :class:`Headers`. .. versionchanged:: 0.9 This data structure now stores unicode values similar to how the multi dicts do it. The main difference is that bytes can be set as well which will automatically be latin1 decoded. .. versionchanged:: 0.9 The :meth:`linked` function was removed without replacement as it was an API that does not support the changes to the encoding model. """ def __init__(self, defaults=None): self._list = [] if defaults is not None: if isinstance(defaults, (list, Headers)): self._list.extend(defaults) else: self.extend(defaults) def __getitem__(self, key, _get_mode=False): if not _get_mode: if isinstance(key, integer_types): return self._list[key] elif isinstance(key, slice): return self.__class__(self._list[key]) if not isinstance(key, string_types): raise exceptions.BadRequestKeyError(key) ikey = key.lower() for k, v in self._list: if k.lower() == ikey: return v # micro optimization: if we are in get mode we will catch that # exception one stack level down so we can raise a standard # key error instead of our special one. if _get_mode: raise KeyError() raise exceptions.BadRequestKeyError(key) def __eq__(self, other): def lowered(item): return (item[0].lower(),) + item[1:] return other.__class__ is self.__class__ and set( map(lowered, other._list) ) == set(map(lowered, self._list)) __hash__ = None def __ne__(self, other): return not self.__eq__(other) def get(self, key, default=None, type=None, as_bytes=False): """Return the default value if the requested data doesn't exist. If `type` is provided and is a callable it should convert the value, return it or raise a :exc:`ValueError` if that is not possible. In this case the function will return the default as if the value was not found: >>> d = Headers([('Content-Length', '42')]) >>> d.get('Content-Length', type=int) 42 If a headers object is bound you must not add unicode strings because no encoding takes place. .. versionadded:: 0.9 Added support for `as_bytes`. :param key: The key to be looked up. :param default: The default value to be returned if the key can't be looked up. If not further specified `None` is returned. :param type: A callable that is used to cast the value in the :class:`Headers`. If a :exc:`ValueError` is raised by this callable the default value is returned. :param as_bytes: return bytes instead of unicode strings. """ try: rv = self.__getitem__(key, _get_mode=True) except KeyError: return default if as_bytes: rv = rv.encode("latin1") if type is None: return rv try: return type(rv) except ValueError: return default def getlist(self, key, type=None, as_bytes=False): """Return the list of items for a given key. If that key is not in the :class:`Headers`, the return value will be an empty list. Just as :meth:`get` :meth:`getlist` accepts a `type` parameter. All items will be converted with the callable defined there. .. versionadded:: 0.9 Added support for `as_bytes`. :param key: The key to be looked up. :param type: A callable that is used to cast the value in the :class:`Headers`. If a :exc:`ValueError` is raised by this callable the value will be removed from the list. :return: a :class:`list` of all the values for the key. :param as_bytes: return bytes instead of unicode strings. """ ikey = key.lower() result = [] for k, v in self: if k.lower() == ikey: if as_bytes: v = v.encode("latin1") if type is not None: try: v = type(v) except ValueError: continue result.append(v) return result def get_all(self, name): """Return a list of all the values for the named field. This method is compatible with the :mod:`wsgiref` :meth:`~wsgiref.headers.Headers.get_all` method. """ return self.getlist(name) def items(self, lower=False): for key, value in self: if lower: key = key.lower() yield key, value def keys(self, lower=False): for key, _ in iteritems(self, lower): yield key def values(self): for _, value in iteritems(self): yield value def extend(self, *args, **kwargs): """Extend headers in this object with items from another object containing header items as well as keyword arguments. To replace existing keys instead of extending, use :meth:`update` instead. If provided, the first argument can be another :class:`Headers` object, a :class:`MultiDict`, :class:`dict`, or iterable of pairs. .. versionchanged:: 1.0 Support :class:`MultiDict`. Allow passing ``kwargs``. """ if len(args) > 1: raise TypeError("update expected at most 1 arguments, got %d" % len(args)) if args: for key, value in iter_multi_items(args[0]): self.add(key, value) for key, value in iter_multi_items(kwargs): self.add(key, value) def __delitem__(self, key, _index_operation=True): if _index_operation and isinstance(key, (integer_types, slice)): del self._list[key] return key = key.lower() new = [] for k, v in self._list: if k.lower() != key: new.append((k, v)) self._list[:] = new def remove(self, key): """Remove a key. :param key: The key to be removed. """ return self.__delitem__(key, _index_operation=False) def pop(self, key=None, default=_missing): """Removes and returns a key or index. :param key: The key to be popped. If this is an integer the item at that position is removed, if it's a string the value for that key is. If the key is omitted or `None` the last item is removed. :return: an item. """ if key is None: return self._list.pop() if isinstance(key, integer_types): return self._list.pop(key) try: rv = self[key] self.remove(key) except KeyError: if default is not _missing: return default raise return rv def popitem(self): """Removes a key or index and returns a (key, value) item.""" return self.pop() def __contains__(self, key): """Check if a key is present.""" try: self.__getitem__(key, _get_mode=True) except KeyError: return False return True has_key = __contains__ def __iter__(self): """Yield ``(key, value)`` tuples.""" return iter(self._list) def __len__(self): return len(self._list) def add(self, _key, _value, **kw): """Add a new header tuple to the list. Keyword arguments can specify additional parameters for the header value, with underscores converted to dashes:: >>> d = Headers() >>> d.add('Content-Type', 'text/plain') >>> d.add('Content-Disposition', 'attachment', filename='foo.png') The keyword argument dumping uses :func:`dump_options_header` behind the scenes. .. versionadded:: 0.4.1 keyword arguments were added for :mod:`wsgiref` compatibility. """ if kw: _value = _options_header_vkw(_value, kw) _key = _unicodify_header_value(_key) _value = _unicodify_header_value(_value) self._validate_value(_value) self._list.append((_key, _value)) def _validate_value(self, value): if not isinstance(value, text_type): raise TypeError("Value should be unicode.") if u"\n" in value or u"\r" in value: raise ValueError( "Detected newline in header value. This is " "a potential security problem" ) def add_header(self, _key, _value, **_kw): """Add a new header tuple to the list. An alias for :meth:`add` for compatibility with the :mod:`wsgiref` :meth:`~wsgiref.headers.Headers.add_header` method. """ self.add(_key, _value, **_kw) def clear(self): """Clears all headers.""" del self._list[:] def set(self, _key, _value, **kw): """Remove all header tuples for `key` and add a new one. The newly added key either appears at the end of the list if there was no entry or replaces the first one. Keyword arguments can specify additional parameters for the header value, with underscores converted to dashes. See :meth:`add` for more information. .. versionchanged:: 0.6.1 :meth:`set` now accepts the same arguments as :meth:`add`. :param key: The key to be inserted. :param value: The value to be inserted. """ if kw: _value = _options_header_vkw(_value, kw) _key = _unicodify_header_value(_key) _value = _unicodify_header_value(_value) self._validate_value(_value) if not self._list: self._list.append((_key, _value)) return listiter = iter(self._list) ikey = _key.lower() for idx, (old_key, _old_value) in enumerate(listiter): if old_key.lower() == ikey: # replace first occurrence self._list[idx] = (_key, _value) break else: self._list.append((_key, _value)) return self._list[idx + 1 :] = [t for t in listiter if t[0].lower() != ikey] def setlist(self, key, values): """Remove any existing values for a header and add new ones. :param key: The header key to set. :param values: An iterable of values to set for the key. .. versionadded:: 1.0 """ if values: values_iter = iter(values) self.set(key, next(values_iter)) for value in values_iter: self.add(key, value) else: self.remove(key) def setdefault(self, key, default): """Return the first value for the key if it is in the headers, otherwise set the header to the value given by ``default`` and return that. :param key: The header key to get. :param default: The value to set for the key if it is not in the headers. """ if key in self: return self[key] self.set(key, default) return default def setlistdefault(self, key, default): """Return the list of values for the key if it is in the headers, otherwise set the header to the list of values given by ``default`` and return that. Unlike :meth:`MultiDict.setlistdefault`, modifying the returned list will not affect the headers. :param key: The header key to get. :param default: An iterable of values to set for the key if it is not in the headers. .. versionadded:: 1.0 """ if key not in self: self.setlist(key, default) return self.getlist(key) def __setitem__(self, key, value): """Like :meth:`set` but also supports index/slice based setting.""" if isinstance(key, (slice, integer_types)): if isinstance(key, integer_types): value = [value] value = [ (_unicodify_header_value(k), _unicodify_header_value(v)) for (k, v) in value ] [self._validate_value(v) for (k, v) in value] if isinstance(key, integer_types): self._list[key] = value[0] else: self._list[key] = value else: self.set(key, value) def update(self, *args, **kwargs): """Replace headers in this object with items from another headers object and keyword arguments. To extend existing keys instead of replacing, use :meth:`extend` instead. If provided, the first argument can be another :class:`Headers` object, a :class:`MultiDict`, :class:`dict`, or iterable of pairs. .. versionadded:: 1.0 """ if len(args) > 1: raise TypeError("update expected at most 1 arguments, got %d" % len(args)) if args: mapping = args[0] if isinstance(mapping, (Headers, MultiDict)): for key in mapping.keys(): self.setlist(key, mapping.getlist(key)) elif isinstance(mapping, dict): for key, value in iteritems(mapping): if isinstance(value, (list, tuple)): self.setlist(key, value) else: self.set(key, value) else: for key, value in mapping: self.set(key, value) for key, value in iteritems(kwargs): if isinstance(value, (list, tuple)): self.setlist(key, value) else: self.set(key, value) def to_wsgi_list(self): """Convert the headers into a list suitable for WSGI. The values are byte strings in Python 2 converted to latin1 and unicode strings in Python 3 for the WSGI server to encode. :return: list """ if PY2: return [(to_native(k), v.encode("latin1")) for k, v in self] return list(self) def copy(self): return self.__class__(self._list) def __copy__(self): return self.copy() def __str__(self): """Returns formatted headers suitable for HTTP transmission.""" strs = [] for key, value in self.to_wsgi_list(): strs.append("%s: %s" % (key, value)) strs.append("\r\n") return "\r\n".join(strs) def __repr__(self): return "%s(%r)" % (self.__class__.__name__, list(self)) class ImmutableHeadersMixin(object): """Makes a :class:`Headers` immutable. We do not mark them as hashable though since the only usecase for this datastructure in Werkzeug is a view on a mutable structure. .. versionadded:: 0.5 :private: """ def __delitem__(self, key, **kwargs): is_immutable(self) def __setitem__(self, key, value): is_immutable(self) def set(self, key, value): is_immutable(self) def setlist(self, key, value): is_immutable(self) def add(self, item): is_immutable(self) def add_header(self, item): is_immutable(self) def remove(self, item): is_immutable(self) def extend(self, *args, **kwargs): is_immutable(self) def update(self, *args, **kwargs): is_immutable(self) def insert(self, pos, value): is_immutable(self) def pop(self, index=-1): is_immutable(self) def popitem(self): is_immutable(self) def setdefault(self, key, default): is_immutable(self) def setlistdefault(self, key, default): is_immutable(self) class EnvironHeaders(ImmutableHeadersMixin, Headers): """Read only version of the headers from a WSGI environment. This provides the same interface as `Headers` and is constructed from a WSGI environment. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. """ def __init__(self, environ): self.environ = environ def __eq__(self, other): return self.environ is other.environ __hash__ = None def __getitem__(self, key, _get_mode=False): # _get_mode is a no-op for this class as there is no index but # used because get() calls it. if not isinstance(key, string_types): raise KeyError(key) key = key.upper().replace("-", "_") if key in ("CONTENT_TYPE", "CONTENT_LENGTH"): return _unicodify_header_value(self.environ[key]) return _unicodify_header_value(self.environ["HTTP_" + key]) def __len__(self): # the iter is necessary because otherwise list calls our # len which would call list again and so forth. return len(list(iter(self))) def __iter__(self): for key, value in iteritems(self.environ): if key.startswith("HTTP_") and key not in ( "HTTP_CONTENT_TYPE", "HTTP_CONTENT_LENGTH", ): yield ( key[5:].replace("_", "-").title(), _unicodify_header_value(value), ) elif key in ("CONTENT_TYPE", "CONTENT_LENGTH") and value: yield (key.replace("_", "-").title(), _unicodify_header_value(value)) def copy(self): raise TypeError("cannot create %r copies" % self.__class__.__name__) @native_itermethods(["keys", "values", "items", "lists", "listvalues"]) class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict): """A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict` instances as sequence and it will combine the return values of all wrapped dicts: >>> from werkzeug.datastructures import CombinedMultiDict, MultiDict >>> post = MultiDict([('foo', 'bar')]) >>> get = MultiDict([('blub', 'blah')]) >>> combined = CombinedMultiDict([get, post]) >>> combined['foo'] 'bar' >>> combined['blub'] 'blah' This works for all read operations and will raise a `TypeError` for methods that usually change data which isn't possible. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. """ def __reduce_ex__(self, protocol): return type(self), (self.dicts,) def __init__(self, dicts=None): self.dicts = dicts or [] @classmethod def fromkeys(cls): raise TypeError("cannot create %r instances by fromkeys" % cls.__name__) def __getitem__(self, key): for d in self.dicts: if key in d: return d[key] raise exceptions.BadRequestKeyError(key) def get(self, key, default=None, type=None): for d in self.dicts: if key in d: if type is not None: try: return type(d[key]) except ValueError: continue return d[key] return default def getlist(self, key, type=None): rv = [] for d in self.dicts: rv.extend(d.getlist(key, type)) return rv def _keys_impl(self): """This function exists so __len__ can be implemented more efficiently, saving one list creation from an iterator. Using this for Python 2's ``dict.keys`` behavior would be useless since `dict.keys` in Python 2 returns a list, while we have a set here. """ rv = set() for d in self.dicts: rv.update(iterkeys(d)) return rv def keys(self): return iter(self._keys_impl()) __iter__ = keys def items(self, multi=False): found = set() for d in self.dicts: for key, value in iteritems(d, multi): if multi: yield key, value elif key not in found: found.add(key) yield key, value def values(self): for _key, value in iteritems(self): yield value def lists(self): rv = {} for d in self.dicts: for key, values in iterlists(d): rv.setdefault(key, []).extend(values) return iteritems(rv) def listvalues(self): return (x[1] for x in self.lists()) def copy(self): """Return a shallow mutable copy of this object. This returns a :class:`MultiDict` representing the data at the time of copying. The copy will no longer reflect changes to the wrapped dicts. .. versionchanged:: 0.15 Return a mutable :class:`MultiDict`. """ return MultiDict(self) def to_dict(self, flat=True): """Return the contents as regular dict. If `flat` is `True` the returned dict will only have the first item present, if `flat` is `False` all values will be returned as lists. :param flat: If set to `False` the dict returned will have lists with all the values in it. Otherwise it will only contain the first item for each key. :return: a :class:`dict` """ rv = {} for d in reversed(self.dicts): rv.update(d.to_dict(flat)) return rv def __len__(self): return len(self._keys_impl()) def __contains__(self, key): for d in self.dicts: if key in d: return True return False has_key = __contains__ def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self.dicts) class FileMultiDict(MultiDict): """A special :class:`MultiDict` that has convenience methods to add files to it. This is used for :class:`EnvironBuilder` and generally useful for unittesting. .. versionadded:: 0.5 """ def add_file(self, name, file, filename=None, content_type=None): """Adds a new file to the dict. `file` can be a file name or a :class:`file`-like or a :class:`FileStorage` object. :param name: the name of the field. :param file: a filename or :class:`file`-like object :param filename: an optional filename :param content_type: an optional content type """ if isinstance(file, FileStorage): value = file else: if isinstance(file, string_types): if filename is None: filename = file file = open(file, "rb") if filename and content_type is None: content_type = ( mimetypes.guess_type(filename)[0] or "application/octet-stream" ) value = FileStorage(file, filename, name, content_type) self.add(name, value) class ImmutableDict(ImmutableDictMixin, dict): """An immutable :class:`dict`. .. versionadded:: 0.5 """ def __repr__(self): return "%s(%s)" % (self.__class__.__name__, dict.__repr__(self)) def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return dict(self) def __copy__(self): return self class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict): """An immutable :class:`MultiDict`. .. versionadded:: 0.5 """ def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return MultiDict(self) def __copy__(self): return self class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict): """An immutable :class:`OrderedMultiDict`. .. versionadded:: 0.6 """ def _iter_hashitems(self): return enumerate(iteritems(self, multi=True)) def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return OrderedMultiDict(self) def __copy__(self): return self @native_itermethods(["values"]) class Accept(ImmutableList): """An :class:`Accept` object is just a list subclass for lists of ``(value, quality)`` tuples. It is automatically sorted by specificity and quality. All :class:`Accept` objects work similar to a list but provide extra functionality for working with the data. Containment checks are normalized to the rules of that header: >>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)]) >>> a.best 'ISO-8859-1' >>> 'iso-8859-1' in a True >>> 'UTF8' in a True >>> 'utf7' in a False To get the quality for an item you can use normal item lookup: >>> print a['utf-8'] 0.7 >>> a['utf7'] 0 .. versionchanged:: 0.5 :class:`Accept` objects are forced immutable now. .. versionchanged:: 1.0.0 :class:`Accept` internal values are no longer ordered alphabetically for equal quality tags. Instead the initial order is preserved. """ def __init__(self, values=()): if values is None: list.__init__(self) self.provided = False elif isinstance(values, Accept): self.provided = values.provided list.__init__(self, values) else: self.provided = True values = sorted( values, key=lambda x: (self._specificity(x[0]), x[1]), reverse=True, ) list.__init__(self, values) def _specificity(self, value): """Returns a tuple describing the value's specificity.""" return (value != "*",) def _value_matches(self, value, item): """Check if a value matches a given accept item.""" return item == "*" or item.lower() == value.lower() def __getitem__(self, key): """Besides index lookup (getting item n) you can also pass it a string to get the quality for the item. If the item is not in the list, the returned quality is ``0``. """ if isinstance(key, string_types): return self.quality(key) return list.__getitem__(self, key) def quality(self, key): """Returns the quality of the key. .. versionadded:: 0.6 In previous versions you had to use the item-lookup syntax (eg: ``obj[key]`` instead of ``obj.quality(key)``) """ for item, quality in self: if self._value_matches(key, item): return quality return 0 def __contains__(self, value): for item, _quality in self: if self._value_matches(value, item): return True return False def __repr__(self): return "%s([%s])" % ( self.__class__.__name__, ", ".join("(%r, %s)" % (x, y) for x, y in self), ) def index(self, key): """Get the position of an entry or raise :exc:`ValueError`. :param key: The key to be looked up. .. versionchanged:: 0.5 This used to raise :exc:`IndexError`, which was inconsistent with the list API. """ if isinstance(key, string_types): for idx, (item, _quality) in enumerate(self): if self._value_matches(key, item): return idx raise ValueError(key) return list.index(self, key) def find(self, key): """Get the position of an entry or return -1. :param key: The key to be looked up. """ try: return self.index(key) except ValueError: return -1 def values(self): """Iterate over all values.""" for item in self: yield item[0] def to_header(self): """Convert the header set into an HTTP header string.""" result = [] for value, quality in self: if quality != 1: value = "%s;q=%s" % (value, quality) result.append(value) return ",".join(result) def __str__(self): return self.to_header() def _best_single_match(self, match): for client_item, quality in self: if self._value_matches(match, client_item): # self is sorted by specificity descending, we can exit return client_item, quality def best_match(self, matches, default=None): """Returns the best match from a list of possible matches based on the specificity and quality of the client. If two items have the same quality and specificity, the one is returned that comes first. :param matches: a list of matches to check for :param default: the value that is returned if none match """ result = default best_quality = -1 best_specificity = (-1,) for server_item in matches: match = self._best_single_match(server_item) if not match: continue client_item, quality = match specificity = self._specificity(client_item) if quality <= 0 or quality < best_quality: continue # better quality or same quality but more specific => better match if quality > best_quality or specificity > best_specificity: result = server_item best_quality = quality best_specificity = specificity return result @property def best(self): """The best match as value.""" if self: return self[0][0] _mime_split_re = re.compile(r"/|(?:\s*;\s*)") def _normalize_mime(value): return _mime_split_re.split(value.lower()) class MIMEAccept(Accept): """Like :class:`Accept` but with special methods and behavior for mimetypes. """ def _specificity(self, value): return tuple(x != "*" for x in _mime_split_re.split(value)) def _value_matches(self, value, item): # item comes from the client, can't match if it's invalid. if "/" not in item: return False # value comes from the application, tell the developer when it # doesn't look valid. if "/" not in value: raise ValueError("invalid mimetype %r" % value) # Split the match value into type, subtype, and a sorted list of parameters. normalized_value = _normalize_mime(value) value_type, value_subtype = normalized_value[:2] value_params = sorted(normalized_value[2:]) # "*/*" is the only valid value that can start with "*". if value_type == "*" and value_subtype != "*": raise ValueError("invalid mimetype %r" % value) # Split the accept item into type, subtype, and parameters. normalized_item = _normalize_mime(item) item_type, item_subtype = normalized_item[:2] item_params = sorted(normalized_item[2:]) # "*/not-*" from the client is invalid, can't match. if item_type == "*" and item_subtype != "*": return False return ( (item_type == "*" and item_subtype == "*") or (value_type == "*" and value_subtype == "*") ) or ( item_type == value_type and ( item_subtype == "*" or value_subtype == "*" or (item_subtype == value_subtype and item_params == value_params) ) ) @property def accept_html(self): """True if this object accepts HTML.""" return ( "text/html" in self or "application/xhtml+xml" in self or self.accept_xhtml ) @property def accept_xhtml(self): """True if this object accepts XHTML.""" return "application/xhtml+xml" in self or "application/xml" in self @property def accept_json(self): """True if this object accepts JSON.""" return "application/json" in self _locale_delim_re = re.compile(r"[_-]") def _normalize_lang(value): """Process a language tag for matching.""" return _locale_delim_re.split(value.lower()) class LanguageAccept(Accept): """Like :class:`Accept` but with normalization for language tags.""" def _value_matches(self, value, item): return item == "*" or _normalize_lang(value) == _normalize_lang(item) def best_match(self, matches, default=None): """Given a list of supported values, finds the best match from the list of accepted values. Language tags are normalized for the purpose of matching, but are returned unchanged. If no exact match is found, this will fall back to matching the first subtag (primary language only), first with the accepted values then with the match values. This partial is not applied to any other language subtags. The default is returned if no exact or fallback match is found. :param matches: A list of supported languages to find a match. :param default: The value that is returned if none match. """ # Look for an exact match first. If a client accepts "en-US", # "en-US" is a valid match at this point. result = super(LanguageAccept, self).best_match(matches) if result is not None: return result # Fall back to accepting primary tags. If a client accepts # "en-US", "en" is a valid match at this point. Need to use # re.split to account for 2 or 3 letter codes. fallback = Accept( [(_locale_delim_re.split(item[0], 1)[0], item[1]) for item in self] ) result = fallback.best_match(matches) if result is not None: return result # Fall back to matching primary tags. If the client accepts # "en", "en-US" is a valid match at this point. fallback_matches = [_locale_delim_re.split(item, 1)[0] for item in matches] result = super(LanguageAccept, self).best_match(fallback_matches) # Return a value from the original match list. Find the first # original value that starts with the matched primary tag. if result is not None: return next(item for item in matches if item.startswith(result)) return default class CharsetAccept(Accept): """Like :class:`Accept` but with normalization for charsets.""" def _value_matches(self, value, item): def _normalize(name): try: return codecs.lookup(name).name except LookupError: return name.lower() return item == "*" or _normalize(value) == _normalize(item) def cache_property(key, empty, type): """Return a new property object for a cache header. Useful if you want to add support for a cache extension in a subclass.""" return property( lambda x: x._get_cache_value(key, empty, type), lambda x, v: x._set_cache_value(key, v, type), lambda x: x._del_cache_value(key), "accessor for %r" % key, ) class _CacheControl(UpdateDictMixin, dict): """Subclass of a dict that stores values for a Cache-Control header. It has accessors for all the cache-control directives specified in RFC 2616. The class does not differentiate between request and response directives. Because the cache-control directives in the HTTP header use dashes the python descriptors use underscores for that. To get a header of the :class:`CacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionchanged:: 0.4 Setting `no_cache` or `private` to boolean `True` will set the implicit none-value which is ``*``: >>> cc = ResponseCacheControl() >>> cc.no_cache = True >>> cc <ResponseCacheControl 'no-cache'> >>> cc.no_cache '*' >>> cc.no_cache = None >>> cc <ResponseCacheControl ''> In versions before 0.5 the behavior documented here affected the now no longer existing `CacheControl` class. """ no_cache = cache_property("no-cache", "*", None) no_store = cache_property("no-store", None, bool) max_age = cache_property("max-age", -1, int) no_transform = cache_property("no-transform", None, None) def __init__(self, values=(), on_update=None): dict.__init__(self, values or ()) self.on_update = on_update self.provided = values is not None def _get_cache_value(self, key, empty, type): """Used internally by the accessor properties.""" if type is bool: return key in self if key in self: value = self[key] if value is None: return empty elif type is not None: try: value = type(value) except ValueError: pass return value def _set_cache_value(self, key, value, type): """Used internally by the accessor properties.""" if type is bool: if value: self[key] = None else: self.pop(key, None) else: if value is None: self.pop(key, None) elif value is True: self[key] = None else: self[key] = value def _del_cache_value(self, key): """Used internally by the accessor properties.""" if key in self: del self[key] def to_header(self): """Convert the stored values into a cache control header.""" return dump_header(self) def __str__(self): return self.to_header() def __repr__(self): return "<%s %s>" % ( self.__class__.__name__, " ".join("%s=%r" % (k, v) for k, v in sorted(self.items())), ) class RequestCacheControl(ImmutableDictMixin, _CacheControl): """A cache control for requests. This is immutable and gives access to all the request-relevant cache control headers. To get a header of the :class:`RequestCacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionadded:: 0.5 In previous versions a `CacheControl` class existed that was used both for request and response. """ max_stale = cache_property("max-stale", "*", int) min_fresh = cache_property("min-fresh", "*", int) only_if_cached = cache_property("only-if-cached", None, bool) class ResponseCacheControl(_CacheControl): """A cache control for responses. Unlike :class:`RequestCacheControl` this is mutable and gives access to response-relevant cache control headers. To get a header of the :class:`ResponseCacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionadded:: 0.5 In previous versions a `CacheControl` class existed that was used both for request and response. """ public = cache_property("public", None, bool) private = cache_property("private", "*", None) must_revalidate = cache_property("must-revalidate", None, bool) proxy_revalidate = cache_property("proxy-revalidate", None, bool) s_maxage = cache_property("s-maxage", None, None) immutable = cache_property("immutable", None, bool) # attach cache_property to the _CacheControl as staticmethod # so that others can reuse it. _CacheControl.cache_property = staticmethod(cache_property) def csp_property(key): """Return a new property object for a content security policy header. Useful if you want to add support for a csp extension in a subclass. """ return property( lambda x: x._get_value(key), lambda x, v: x._set_value(key, v), lambda x: x._del_value(key), "accessor for %r" % key, ) class ContentSecurityPolicy(UpdateDictMixin, dict): """Subclass of a dict that stores values for a Content Security Policy header. It has accessors for all the level 3 policies. Because the csp directives in the HTTP header use dashes the python descriptors use underscores for that. To get a header of the :class:`ContentSecuirtyPolicy` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionadded:: 1.0.0 Support for Content Security Policy headers was added. """ base_uri = csp_property("base-uri") child_src = csp_property("child-src") connect_src = csp_property("connect-src") default_src = csp_property("default-src") font_src = csp_property("font-src") form_action = csp_property("form-action") frame_ancestors = csp_property("frame-ancestors") frame_src = csp_property("frame-src") img_src = csp_property("img-src") manifest_src = csp_property("manifest-src") media_src = csp_property("media-src") navigate_to = csp_property("navigate-to") object_src = csp_property("object-src") prefetch_src = csp_property("prefetch-src") plugin_types = csp_property("plugin-types") report_to = csp_property("report-to") report_uri = csp_property("report-uri") sandbox = csp_property("sandbox") script_src = csp_property("script-src") script_src_attr = csp_property("script-src-attr") script_src_elem = csp_property("script-src-elem") style_src = csp_property("style-src") style_src_attr = csp_property("style-src-attr") style_src_elem = csp_property("style-src-elem") worker_src = csp_property("worker-src") def __init__(self, values=(), on_update=None): dict.__init__(self, values or ()) self.on_update = on_update self.provided = values is not None def _get_value(self, key): """Used internally by the accessor properties.""" return self.get(key) def _set_value(self, key, value): """Used internally by the accessor properties.""" if value is None: self.pop(key, None) else: self[key] = value def _del_value(self, key): """Used internally by the accessor properties.""" if key in self: del self[key] def to_header(self): """Convert the stored values into a cache control header.""" return dump_csp_header(self) def __str__(self): return self.to_header() def __repr__(self): return "<%s %s>" % ( self.__class__.__name__, " ".join("%s=%r" % (k, v) for k, v in sorted(self.items())), ) class CallbackDict(UpdateDictMixin, dict): """A dict that calls a function passed every time something is changed. The function is passed the dict instance. """ def __init__(self, initial=None, on_update=None): dict.__init__(self, initial or ()) self.on_update = on_update def __repr__(self): return "<%s %s>" % (self.__class__.__name__, dict.__repr__(self)) class HeaderSet(collections_abc.MutableSet): """Similar to the :class:`ETags` class this implements a set-like structure. Unlike :class:`ETags` this is case insensitive and used for vary, allow, and content-language headers. If not constructed using the :func:`parse_set_header` function the instantiation works like this: >>> hs = HeaderSet(['foo', 'bar', 'baz']) >>> hs HeaderSet(['foo', 'bar', 'baz']) """ def __init__(self, headers=None, on_update=None): self._headers = list(headers or ()) self._set = set([x.lower() for x in self._headers]) self.on_update = on_update def add(self, header): """Add a new header to the set.""" self.update((header,)) def remove(self, header): """Remove a header from the set. This raises an :exc:`KeyError` if the header is not in the set. .. versionchanged:: 0.5 In older versions a :exc:`IndexError` was raised instead of a :exc:`KeyError` if the object was missing. :param header: the header to be removed. """ key = header.lower() if key not in self._set: raise KeyError(header) self._set.remove(key) for idx, key in enumerate(self._headers): if key.lower() == header: del self._headers[idx] break if self.on_update is not None: self.on_update(self) def update(self, iterable): """Add all the headers from the iterable to the set. :param iterable: updates the set with the items from the iterable. """ inserted_any = False for header in iterable: key = header.lower() if key not in self._set: self._headers.append(header) self._set.add(key) inserted_any = True if inserted_any and self.on_update is not None: self.on_update(self) def discard(self, header): """Like :meth:`remove` but ignores errors. :param header: the header to be discarded. """ try: return self.remove(header) except KeyError: pass def find(self, header): """Return the index of the header in the set or return -1 if not found. :param header: the header to be looked up. """ header = header.lower() for idx, item in enumerate(self._headers): if item.lower() == header: return idx return -1 def index(self, header): """Return the index of the header in the set or raise an :exc:`IndexError`. :param header: the header to be looked up. """ rv = self.find(header) if rv < 0: raise IndexError(header) return rv def clear(self): """Clear the set.""" self._set.clear() del self._headers[:] if self.on_update is not None: self.on_update(self) def as_set(self, preserve_casing=False): """Return the set as real python set type. When calling this, all the items are converted to lowercase and the ordering is lost. :param preserve_casing: if set to `True` the items in the set returned will have the original case like in the :class:`HeaderSet`, otherwise they will be lowercase. """ if preserve_casing: return set(self._headers) return set(self._set) def to_header(self): """Convert the header set into an HTTP header string.""" return ", ".join(map(quote_header_value, self._headers)) def __getitem__(self, idx): return self._headers[idx] def __delitem__(self, idx): rv = self._headers.pop(idx) self._set.remove(rv.lower()) if self.on_update is not None: self.on_update(self) def __setitem__(self, idx, value): old = self._headers[idx] self._set.remove(old.lower()) self._headers[idx] = value self._set.add(value.lower()) if self.on_update is not None: self.on_update(self) def __contains__(self, header): return header.lower() in self._set def __len__(self): return len(self._set) def __iter__(self): return iter(self._headers) def __nonzero__(self): return bool(self._set) def __str__(self): return self.to_header() def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self._headers) class ETags(collections_abc.Container, collections_abc.Iterable): """A set that can be used to check if one etag is present in a collection of etags. """ def __init__(self, strong_etags=None, weak_etags=None, star_tag=False): self._strong = frozenset(not star_tag and strong_etags or ()) self._weak = frozenset(weak_etags or ()) self.star_tag = star_tag def as_set(self, include_weak=False): """Convert the `ETags` object into a python set. Per default all the weak etags are not part of this set.""" rv = set(self._strong) if include_weak: rv.update(self._weak) return rv def is_weak(self, etag): """Check if an etag is weak.""" return etag in self._weak def is_strong(self, etag): """Check if an etag is strong.""" return etag in self._strong def contains_weak(self, etag): """Check if an etag is part of the set including weak and strong tags.""" return self.is_weak(etag) or self.contains(etag) def contains(self, etag): """Check if an etag is part of the set ignoring weak tags. It is also possible to use the ``in`` operator. """ if self.star_tag: return True return self.is_strong(etag) def contains_raw(self, etag): """When passed a quoted tag it will check if this tag is part of the set. If the tag is weak it is checked against weak and strong tags, otherwise strong only.""" etag, weak = unquote_etag(etag) if weak: return self.contains_weak(etag) return self.contains(etag) def to_header(self): """Convert the etags set into a HTTP header string.""" if self.star_tag: return "*" return ", ".join( ['"%s"' % x for x in self._strong] + ['W/"%s"' % x for x in self._weak] ) def __call__(self, etag=None, data=None, include_weak=False): if [etag, data].count(None) != 1: raise TypeError("either tag or data required, but at least one") if etag is None: etag = generate_etag(data) if include_weak: if etag in self._weak: return True return etag in self._strong def __bool__(self): return bool(self.star_tag or self._strong or self._weak) __nonzero__ = __bool__ def __str__(self): return self.to_header() def __iter__(self): return iter(self._strong) def __contains__(self, etag): return self.contains(etag) def __repr__(self): return "<%s %r>" % (self.__class__.__name__, str(self)) class IfRange(object): """Very simple object that represents the `If-Range` header in parsed form. It will either have neither a etag or date or one of either but never both. .. versionadded:: 0.7 """ def __init__(self, etag=None, date=None): #: The etag parsed and unquoted. Ranges always operate on strong #: etags so the weakness information is not necessary. self.etag = etag #: The date in parsed format or `None`. self.date = date def to_header(self): """Converts the object back into an HTTP header.""" if self.date is not None: return http_date(self.date) if self.etag is not None: return quote_etag(self.etag) return "" def __str__(self): return self.to_header() def __repr__(self): return "<%s %r>" % (self.__class__.__name__, str(self)) class Range(object): """Represents a ``Range`` header. All methods only support only bytes as the unit. Stores a list of ranges if given, but the methods only work if only one range is provided. :raise ValueError: If the ranges provided are invalid. .. versionchanged:: 0.15 The ranges passed in are validated. .. versionadded:: 0.7 """ def __init__(self, units, ranges): #: The units of this range. Usually "bytes". self.units = units #: A list of ``(begin, end)`` tuples for the range header provided. #: The ranges are non-inclusive. self.ranges = ranges for start, end in ranges: if start is None or (end is not None and (start < 0 or start >= end)): raise ValueError("{} is not a valid range.".format((start, end))) def range_for_length(self, length): """If the range is for bytes, the length is not None and there is exactly one range and it is satisfiable it returns a ``(start, stop)`` tuple, otherwise `None`. """ if self.units != "bytes" or length is None or len(self.ranges) != 1: return None start, end = self.ranges[0] if end is None: end = length if start < 0: start += length if is_byte_range_valid(start, end, length): return start, min(end, length) def make_content_range(self, length): """Creates a :class:`~werkzeug.datastructures.ContentRange` object from the current range and given content length. """ rng = self.range_for_length(length) if rng is not None: return ContentRange(self.units, rng[0], rng[1], length) def to_header(self): """Converts the object back into an HTTP header.""" ranges = [] for begin, end in self.ranges: if end is None: ranges.append("%s-" % begin if begin >= 0 else str(begin)) else: ranges.append("%s-%s" % (begin, end - 1)) return "%s=%s" % (self.units, ",".join(ranges)) def to_content_range_header(self, length): """Converts the object into `Content-Range` HTTP header, based on given length """ range_for_length = self.range_for_length(length) if range_for_length is not None: return "%s %d-%d/%d" % ( self.units, range_for_length[0], range_for_length[1] - 1, length, ) return None def __str__(self): return self.to_header() def __repr__(self): return "<%s %r>" % (self.__class__.__name__, str(self)) class ContentRange(object): """Represents the content range header. .. versionadded:: 0.7 """ def __init__(self, units, start, stop, length=None, on_update=None): assert is_byte_range_valid(start, stop, length), "Bad range provided" self.on_update = on_update self.set(start, stop, length, units) def _callback_property(name): # noqa: B902 def fget(self): return getattr(self, name) def fset(self, value): setattr(self, name, value) if self.on_update is not None: self.on_update(self) return property(fget, fset) #: The units to use, usually "bytes" units = _callback_property("_units") #: The start point of the range or `None`. start = _callback_property("_start") #: The stop point of the range (non-inclusive) or `None`. Can only be #: `None` if also start is `None`. stop = _callback_property("_stop") #: The length of the range or `None`. length = _callback_property("_length") del _callback_property def set(self, start, stop, length=None, units="bytes"): """Simple method to update the ranges.""" assert is_byte_range_valid(start, stop, length), "Bad range provided" self._units = units self._start = start self._stop = stop self._length = length if self.on_update is not None: self.on_update(self) def unset(self): """Sets the units to `None` which indicates that the header should no longer be used. """ self.set(None, None, units=None) def to_header(self): if self.units is None: return "" if self.length is None: length = "*" else: length = self.length if self.start is None: return "%s */%s" % (self.units, length) return "%s %s-%s/%s" % (self.units, self.start, self.stop - 1, length) def __nonzero__(self): return self.units is not None __bool__ = __nonzero__ def __str__(self): return self.to_header() def __repr__(self): return "<%s %r>" % (self.__class__.__name__, str(self)) class Authorization(ImmutableDictMixin, dict): """Represents an `Authorization` header sent by the client. You should not create this kind of object yourself but use it when it's returned by the `parse_authorization_header` function. This object is a dict subclass and can be altered by setting dict items but it should be considered immutable as it's returned by the client and not meant for modifications. .. versionchanged:: 0.5 This object became immutable. """ def __init__(self, auth_type, data=None): dict.__init__(self, data or {}) self.type = auth_type @property def username(self): """The username transmitted. This is set for both basic and digest auth all the time. """ return self.get("username") @property def password(self): """When the authentication type is basic this is the password transmitted by the client, else `None`. """ return self.get("password") @property def realm(self): """This is the server realm sent back for HTTP digest auth.""" return self.get("realm") @property def nonce(self): """The nonce the server sent for digest auth, sent back by the client. A nonce should be unique for every 401 response for HTTP digest auth. """ return self.get("nonce") @property def uri(self): """The URI from Request-URI of the Request-Line; duplicated because proxies are allowed to change the Request-Line in transit. HTTP digest auth only. """ return self.get("uri") @property def nc(self): """The nonce count value transmitted by clients if a qop-header is also transmitted. HTTP digest auth only. """ return self.get("nc") @property def cnonce(self): """If the server sent a qop-header in the ``WWW-Authenticate`` header, the client has to provide this value for HTTP digest auth. See the RFC for more details. """ return self.get("cnonce") @property def response(self): """A string of 32 hex digits computed as defined in RFC 2617, which proves that the user knows a password. Digest auth only. """ return self.get("response") @property def opaque(self): """The opaque header from the server returned unchanged by the client. It is recommended that this string be base64 or hexadecimal data. Digest auth only. """ return self.get("opaque") @property def qop(self): """Indicates what "quality of protection" the client has applied to the message for HTTP digest auth. Note that this is a single token, not a quoted list of alternatives as in WWW-Authenticate. """ return self.get("qop") class WWWAuthenticate(UpdateDictMixin, dict): """Provides simple access to `WWW-Authenticate` headers.""" #: list of keys that require quoting in the generated header _require_quoting = frozenset(["domain", "nonce", "opaque", "realm", "qop"]) def __init__(self, auth_type=None, values=None, on_update=None): dict.__init__(self, values or ()) if auth_type: self["__auth_type__"] = auth_type self.on_update = on_update def set_basic(self, realm="authentication required"): """Clear the auth info and enable basic auth.""" dict.clear(self) dict.update(self, {"__auth_type__": "basic", "realm": realm}) if self.on_update: self.on_update(self) def set_digest( self, realm, nonce, qop=("auth",), opaque=None, algorithm=None, stale=False ): """Clear the auth info and enable digest auth.""" d = { "__auth_type__": "digest", "realm": realm, "nonce": nonce, "qop": dump_header(qop), } if stale: d["stale"] = "TRUE" if opaque is not None: d["opaque"] = opaque if algorithm is not None: d["algorithm"] = algorithm dict.clear(self) dict.update(self, d) if self.on_update: self.on_update(self) def to_header(self): """Convert the stored values into a WWW-Authenticate header.""" d = dict(self) auth_type = d.pop("__auth_type__", None) or "basic" return "%s %s" % ( auth_type.title(), ", ".join( [ "%s=%s" % ( key, quote_header_value( value, allow_token=key not in self._require_quoting ), ) for key, value in iteritems(d) ] ), ) def __str__(self): return self.to_header() def __repr__(self): return "<%s %r>" % (self.__class__.__name__, self.to_header()) def auth_property(name, doc=None): # noqa: B902 """A static helper function for subclasses to add extra authentication system properties onto a class:: class FooAuthenticate(WWWAuthenticate): special_realm = auth_property('special_realm') For more information have a look at the sourcecode to see how the regular properties (:attr:`realm` etc.) are implemented. """ def _set_value(self, value): if value is None: self.pop(name, None) else: self[name] = str(value) return property(lambda x: x.get(name), _set_value, doc=doc) def _set_property(name, doc=None): # noqa: B902 def fget(self): def on_update(header_set): if not header_set and name in self: del self[name] elif header_set: self[name] = header_set.to_header() return parse_set_header(self.get(name), on_update) return property(fget, doc=doc) type = auth_property( "__auth_type__", doc="""The type of the auth mechanism. HTTP currently specifies ``Basic`` and ``Digest``.""", ) realm = auth_property( "realm", doc="""A string to be displayed to users so they know which username and password to use. This string should contain at least the name of the host performing the authentication and might additionally indicate the collection of users who might have access.""", ) domain = _set_property( "domain", doc="""A list of URIs that define the protection space. If a URI is an absolute path, it is relative to the canonical root URL of the server being accessed.""", ) nonce = auth_property( "nonce", doc=""" A server-specified data string which should be uniquely generated each time a 401 response is made. It is recommended that this string be base64 or hexadecimal data.""", ) opaque = auth_property( "opaque", doc="""A string of data, specified by the server, which should be returned by the client unchanged in the Authorization header of subsequent requests with URIs in the same protection space. It is recommended that this string be base64 or hexadecimal data.""", ) algorithm = auth_property( "algorithm", doc="""A string indicating a pair of algorithms used to produce the digest and a checksum. If this is not present it is assumed to be "MD5". If the algorithm is not understood, the challenge should be ignored (and a different one used, if there is more than one).""", ) qop = _set_property( "qop", doc="""A set of quality-of-privacy directives such as auth and auth-int.""", ) @property def stale(self): """A flag, indicating that the previous request from the client was rejected because the nonce value was stale. """ val = self.get("stale") if val is not None: return val.lower() == "true" @stale.setter def stale(self, value): if value is None: self.pop("stale", None) else: self["stale"] = "TRUE" if value else "FALSE" auth_property = staticmethod(auth_property) del _set_property class FileStorage(object): """The :class:`FileStorage` class is a thin wrapper over incoming files. It is used by the request object to represent uploaded files. All the attributes of the wrapper stream are proxied by the file storage so it's possible to do ``storage.read()`` instead of the long form ``storage.stream.read()``. """ def __init__( self, stream=None, filename=None, name=None, content_type=None, content_length=None, headers=None, ): self.name = name self.stream = stream or BytesIO() # if no filename is provided we can attempt to get the filename # from the stream object passed. There we have to be careful to # skip things like <fdopen>, <stderr> etc. Python marks these # special filenames with angular brackets. if filename is None: filename = getattr(stream, "name", None) s = make_literal_wrapper(filename) if filename and filename[0] == s("<") and filename[-1] == s(">"): filename = None # On Python 3 we want to make sure the filename is always unicode. # This might not be if the name attribute is bytes due to the # file being opened from the bytes API. if not PY2 and isinstance(filename, bytes): filename = filename.decode(get_filesystem_encoding(), "replace") self.filename = filename if headers is None: headers = Headers() self.headers = headers if content_type is not None: headers["Content-Type"] = content_type if content_length is not None: headers["Content-Length"] = str(content_length) def _parse_content_type(self): if not hasattr(self, "_parsed_content_type"): self._parsed_content_type = parse_options_header(self.content_type) @property def content_type(self): """The content-type sent in the header. Usually not available""" return self.headers.get("content-type") @property def content_length(self): """The content-length sent in the header. Usually not available""" return int(self.headers.get("content-length") or 0) @property def mimetype(self): """Like :attr:`content_type`, but without parameters (eg, without charset, type etc.) and always lowercase. For example if the content type is ``text/HTML; charset=utf-8`` the mimetype would be ``'text/html'``. .. versionadded:: 0.7 """ self._parse_content_type() return self._parsed_content_type[0].lower() @property def mimetype_params(self): """The mimetype parameters as dict. For example if the content type is ``text/html; charset=utf-8`` the params would be ``{'charset': 'utf-8'}``. .. versionadded:: 0.7 """ self._parse_content_type() return self._parsed_content_type[1] def save(self, dst, buffer_size=16384): """Save the file to a destination path or file object. If the destination is a file object you have to close it yourself after the call. The buffer size is the number of bytes held in memory during the copy process. It defaults to 16KB. For secure file saving also have a look at :func:`secure_filename`. :param dst: a filename, :class:`os.PathLike`, or open file object to write to. :param buffer_size: Passed as the ``length`` parameter of :func:`shutil.copyfileobj`. .. versionchanged:: 1.0 Supports :mod:`pathlib`. """ from shutil import copyfileobj close_dst = False if hasattr(dst, "__fspath__"): dst = fspath(dst) if isinstance(dst, string_types): dst = open(dst, "wb") close_dst = True try: copyfileobj(self.stream, dst, buffer_size) finally: if close_dst: dst.close() def close(self): """Close the underlying file if possible.""" try: self.stream.close() except Exception: pass def __nonzero__(self): return bool(self.filename) __bool__ = __nonzero__ def __getattr__(self, name): try: return getattr(self.stream, name) except AttributeError: # SpooledTemporaryFile doesn't implement IOBase, get the # attribute from its backing file instead. # https://github.com/python/cpython/pull/3249 if hasattr(self.stream, "_file"): return getattr(self.stream._file, name) raise def __iter__(self): return iter(self.stream) def __repr__(self): return "<%s: %r (%r)>" % ( self.__class__.__name__, self.filename, self.content_type, ) # circular dependencies from .http import dump_csp_header from .http import dump_header from .http import dump_options_header from .http import generate_etag from .http import http_date from .http import is_byte_range_valid from .http import parse_options_header from .http import parse_set_header from .http import quote_etag from .http import quote_header_value from .http import unquote_etag
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/urls.py
# -*- coding: utf-8 -*- """ werkzeug.urls ~~~~~~~~~~~~~ ``werkzeug.urls`` used to provide several wrapper functions for Python 2 urlparse, whose main purpose were to work around the behavior of the Py2 stdlib and its lack of unicode support. While this was already a somewhat inconvenient situation, it got even more complicated because Python 3's ``urllib.parse`` actually does handle unicode properly. In other words, this module would wrap two libraries with completely different behavior. So now this module contains a 2-and-3-compatible backport of Python 3's ``urllib.parse``, which is mostly API-compatible. :copyright: 2007 Pallets :license: BSD-3-Clause """ import codecs import os import re from collections import namedtuple from ._compat import fix_tuple_repr from ._compat import implements_to_string from ._compat import make_literal_wrapper from ._compat import normalize_string_tuple from ._compat import PY2 from ._compat import text_type from ._compat import to_native from ._compat import to_unicode from ._compat import try_coerce_native from ._internal import _decode_idna from ._internal import _encode_idna # A regular expression for what a valid schema looks like _scheme_re = re.compile(r"^[a-zA-Z0-9+-.]+$") # Characters that are safe in any part of an URL. _always_safe = frozenset( bytearray( b"abcdefghijklmnopqrstuvwxyz" b"ABCDEFGHIJKLMNOPQRSTUVWXYZ" b"0123456789" b"-._~" ) ) _hexdigits = "0123456789ABCDEFabcdef" _hextobyte = dict( ((a + b).encode(), int(a + b, 16)) for a in _hexdigits for b in _hexdigits ) _bytetohex = [("%%%02X" % char).encode("ascii") for char in range(256)] _URLTuple = fix_tuple_repr( namedtuple("_URLTuple", ["scheme", "netloc", "path", "query", "fragment"]) ) class BaseURL(_URLTuple): """Superclass of :py:class:`URL` and :py:class:`BytesURL`.""" __slots__ = () def replace(self, **kwargs): """Return an URL with the same values, except for those parameters given new values by whichever keyword arguments are specified.""" return self._replace(**kwargs) @property def host(self): """The host part of the URL if available, otherwise `None`. The host is either the hostname or the IP address mentioned in the URL. It will not contain the port. """ return self._split_host()[0] @property def ascii_host(self): """Works exactly like :attr:`host` but will return a result that is restricted to ASCII. If it finds a netloc that is not ASCII it will attempt to idna decode it. This is useful for socket operations when the URL might include internationalized characters. """ rv = self.host if rv is not None and isinstance(rv, text_type): try: rv = _encode_idna(rv) except UnicodeError: rv = rv.encode("ascii", "ignore") return to_native(rv, "ascii", "ignore") @property def port(self): """The port in the URL as an integer if it was present, `None` otherwise. This does not fill in default ports. """ try: rv = int(to_native(self._split_host()[1])) if 0 <= rv <= 65535: return rv except (ValueError, TypeError): pass @property def auth(self): """The authentication part in the URL if available, `None` otherwise. """ return self._split_netloc()[0] @property def username(self): """The username if it was part of the URL, `None` otherwise. This undergoes URL decoding and will always be a unicode string. """ rv = self._split_auth()[0] if rv is not None: return _url_unquote_legacy(rv) @property def raw_username(self): """The username if it was part of the URL, `None` otherwise. Unlike :attr:`username` this one is not being decoded. """ return self._split_auth()[0] @property def password(self): """The password if it was part of the URL, `None` otherwise. This undergoes URL decoding and will always be a unicode string. """ rv = self._split_auth()[1] if rv is not None: return _url_unquote_legacy(rv) @property def raw_password(self): """The password if it was part of the URL, `None` otherwise. Unlike :attr:`password` this one is not being decoded. """ return self._split_auth()[1] def decode_query(self, *args, **kwargs): """Decodes the query part of the URL. Ths is a shortcut for calling :func:`url_decode` on the query argument. The arguments and keyword arguments are forwarded to :func:`url_decode` unchanged. """ return url_decode(self.query, *args, **kwargs) def join(self, *args, **kwargs): """Joins this URL with another one. This is just a convenience function for calling into :meth:`url_join` and then parsing the return value again. """ return url_parse(url_join(self, *args, **kwargs)) def to_url(self): """Returns a URL string or bytes depending on the type of the information stored. This is just a convenience function for calling :meth:`url_unparse` for this URL. """ return url_unparse(self) def decode_netloc(self): """Decodes the netloc part into a string.""" rv = _decode_idna(self.host or "") if ":" in rv: rv = "[%s]" % rv port = self.port if port is not None: rv = "%s:%d" % (rv, port) auth = ":".join( filter( None, [ _url_unquote_legacy(self.raw_username or "", "/:%@"), _url_unquote_legacy(self.raw_password or "", "/:%@"), ], ) ) if auth: rv = "%s@%s" % (auth, rv) return rv def to_uri_tuple(self): """Returns a :class:`BytesURL` tuple that holds a URI. This will encode all the information in the URL properly to ASCII using the rules a web browser would follow. It's usually more interesting to directly call :meth:`iri_to_uri` which will return a string. """ return url_parse(iri_to_uri(self).encode("ascii")) def to_iri_tuple(self): """Returns a :class:`URL` tuple that holds a IRI. This will try to decode as much information as possible in the URL without losing information similar to how a web browser does it for the URL bar. It's usually more interesting to directly call :meth:`uri_to_iri` which will return a string. """ return url_parse(uri_to_iri(self)) def get_file_location(self, pathformat=None): """Returns a tuple with the location of the file in the form ``(server, location)``. If the netloc is empty in the URL or points to localhost, it's represented as ``None``. The `pathformat` by default is autodetection but needs to be set when working with URLs of a specific system. The supported values are ``'windows'`` when working with Windows or DOS paths and ``'posix'`` when working with posix paths. If the URL does not point to a local file, the server and location are both represented as ``None``. :param pathformat: The expected format of the path component. Currently ``'windows'`` and ``'posix'`` are supported. Defaults to ``None`` which is autodetect. """ if self.scheme != "file": return None, None path = url_unquote(self.path) host = self.netloc or None if pathformat is None: if os.name == "nt": pathformat = "windows" else: pathformat = "posix" if pathformat == "windows": if path[:1] == "/" and path[1:2].isalpha() and path[2:3] in "|:": path = path[1:2] + ":" + path[3:] windows_share = path[:3] in ("\\" * 3, "/" * 3) import ntpath path = ntpath.normpath(path) # Windows shared drives are represented as ``\\host\\directory``. # That results in a URL like ``file://///host/directory``, and a # path like ``///host/directory``. We need to special-case this # because the path contains the hostname. if windows_share and host is None: parts = path.lstrip("\\").split("\\", 1) if len(parts) == 2: host, path = parts else: host = parts[0] path = "" elif pathformat == "posix": import posixpath path = posixpath.normpath(path) else: raise TypeError("Invalid path format %s" % repr(pathformat)) if host in ("127.0.0.1", "::1", "localhost"): host = None return host, path def _split_netloc(self): if self._at in self.netloc: return self.netloc.split(self._at, 1) return None, self.netloc def _split_auth(self): auth = self._split_netloc()[0] if not auth: return None, None if self._colon not in auth: return auth, None return auth.split(self._colon, 1) def _split_host(self): rv = self._split_netloc()[1] if not rv: return None, None if not rv.startswith(self._lbracket): if self._colon in rv: return rv.split(self._colon, 1) return rv, None idx = rv.find(self._rbracket) if idx < 0: return rv, None host = rv[1:idx] rest = rv[idx + 1 :] if rest.startswith(self._colon): return host, rest[1:] return host, None @implements_to_string class URL(BaseURL): """Represents a parsed URL. This behaves like a regular tuple but also has some extra attributes that give further insight into the URL. """ __slots__ = () _at = "@" _colon = ":" _lbracket = "[" _rbracket = "]" def __str__(self): return self.to_url() def encode_netloc(self): """Encodes the netloc part to an ASCII safe URL as bytes.""" rv = self.ascii_host or "" if ":" in rv: rv = "[%s]" % rv port = self.port if port is not None: rv = "%s:%d" % (rv, port) auth = ":".join( filter( None, [ url_quote(self.raw_username or "", "utf-8", "strict", "/:%"), url_quote(self.raw_password or "", "utf-8", "strict", "/:%"), ], ) ) if auth: rv = "%s@%s" % (auth, rv) return to_native(rv) def encode(self, charset="utf-8", errors="replace"): """Encodes the URL to a tuple made out of bytes. The charset is only being used for the path, query and fragment. """ return BytesURL( self.scheme.encode("ascii"), self.encode_netloc(), self.path.encode(charset, errors), self.query.encode(charset, errors), self.fragment.encode(charset, errors), ) class BytesURL(BaseURL): """Represents a parsed URL in bytes.""" __slots__ = () _at = b"@" _colon = b":" _lbracket = b"[" _rbracket = b"]" def __str__(self): return self.to_url().decode("utf-8", "replace") def encode_netloc(self): """Returns the netloc unchanged as bytes.""" return self.netloc def decode(self, charset="utf-8", errors="replace"): """Decodes the URL to a tuple made out of strings. The charset is only being used for the path, query and fragment. """ return URL( self.scheme.decode("ascii"), self.decode_netloc(), self.path.decode(charset, errors), self.query.decode(charset, errors), self.fragment.decode(charset, errors), ) _unquote_maps = {frozenset(): _hextobyte} def _unquote_to_bytes(string, unsafe=""): if isinstance(string, text_type): string = string.encode("utf-8") if isinstance(unsafe, text_type): unsafe = unsafe.encode("utf-8") unsafe = frozenset(bytearray(unsafe)) groups = iter(string.split(b"%")) result = bytearray(next(groups, b"")) try: hex_to_byte = _unquote_maps[unsafe] except KeyError: hex_to_byte = _unquote_maps[unsafe] = { h: b for h, b in _hextobyte.items() if b not in unsafe } for group in groups: code = group[:2] if code in hex_to_byte: result.append(hex_to_byte[code]) result.extend(group[2:]) else: result.append(37) # % result.extend(group) return bytes(result) def _url_encode_impl(obj, charset, encode_keys, sort, key): from .datastructures import iter_multi_items iterable = iter_multi_items(obj) if sort: iterable = sorted(iterable, key=key) for key, value in iterable: if value is None: continue if not isinstance(key, bytes): key = text_type(key).encode(charset) if not isinstance(value, bytes): value = text_type(value).encode(charset) yield _fast_url_quote_plus(key) + "=" + _fast_url_quote_plus(value) def _url_unquote_legacy(value, unsafe=""): try: return url_unquote(value, charset="utf-8", errors="strict", unsafe=unsafe) except UnicodeError: return url_unquote(value, charset="latin1", unsafe=unsafe) def url_parse(url, scheme=None, allow_fragments=True): """Parses a URL from a string into a :class:`URL` tuple. If the URL is lacking a scheme it can be provided as second argument. Otherwise, it is ignored. Optionally fragments can be stripped from the URL by setting `allow_fragments` to `False`. The inverse of this function is :func:`url_unparse`. :param url: the URL to parse. :param scheme: the default schema to use if the URL is schemaless. :param allow_fragments: if set to `False` a fragment will be removed from the URL. """ s = make_literal_wrapper(url) is_text_based = isinstance(url, text_type) if scheme is None: scheme = s("") netloc = query = fragment = s("") i = url.find(s(":")) if i > 0 and _scheme_re.match(to_native(url[:i], errors="replace")): # make sure "iri" is not actually a port number (in which case # "scheme" is really part of the path) rest = url[i + 1 :] if not rest or any(c not in s("0123456789") for c in rest): # not a port number scheme, url = url[:i].lower(), rest if url[:2] == s("//"): delim = len(url) for c in s("/?#"): wdelim = url.find(c, 2) if wdelim >= 0: delim = min(delim, wdelim) netloc, url = url[2:delim], url[delim:] if (s("[") in netloc and s("]") not in netloc) or ( s("]") in netloc and s("[") not in netloc ): raise ValueError("Invalid IPv6 URL") if allow_fragments and s("#") in url: url, fragment = url.split(s("#"), 1) if s("?") in url: url, query = url.split(s("?"), 1) result_type = URL if is_text_based else BytesURL return result_type(scheme, netloc, url, query, fragment) def _make_fast_url_quote(charset="utf-8", errors="strict", safe="/:", unsafe=""): """Precompile the translation table for a URL encoding function. Unlike :func:`url_quote`, the generated function only takes the string to quote. :param charset: The charset to encode the result with. :param errors: How to handle encoding errors. :param safe: An optional sequence of safe characters to never encode. :param unsafe: An optional sequence of unsafe characters to always encode. """ if isinstance(safe, text_type): safe = safe.encode(charset, errors) if isinstance(unsafe, text_type): unsafe = unsafe.encode(charset, errors) safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe)) table = [chr(c) if c in safe else "%%%02X" % c for c in range(256)] if not PY2: def quote(string): return "".join([table[c] for c in string]) else: def quote(string): return "".join([table[c] for c in bytearray(string)]) return quote _fast_url_quote = _make_fast_url_quote() _fast_quote_plus = _make_fast_url_quote(safe=" ", unsafe="+") def _fast_url_quote_plus(string): return _fast_quote_plus(string).replace(" ", "+") def url_quote(string, charset="utf-8", errors="strict", safe="/:", unsafe=""): """URL encode a single string with a given encoding. :param s: the string to quote. :param charset: the charset to be used. :param safe: an optional sequence of safe characters. :param unsafe: an optional sequence of unsafe characters. .. versionadded:: 0.9.2 The `unsafe` parameter was added. """ if not isinstance(string, (text_type, bytes, bytearray)): string = text_type(string) if isinstance(string, text_type): string = string.encode(charset, errors) if isinstance(safe, text_type): safe = safe.encode(charset, errors) if isinstance(unsafe, text_type): unsafe = unsafe.encode(charset, errors) safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe)) rv = bytearray() for char in bytearray(string): if char in safe: rv.append(char) else: rv.extend(_bytetohex[char]) return to_native(bytes(rv)) def url_quote_plus(string, charset="utf-8", errors="strict", safe=""): """URL encode a single string with the given encoding and convert whitespace to "+". :param s: The string to quote. :param charset: The charset to be used. :param safe: An optional sequence of safe characters. """ return url_quote(string, charset, errors, safe + " ", "+").replace(" ", "+") def url_unparse(components): """The reverse operation to :meth:`url_parse`. This accepts arbitrary as well as :class:`URL` tuples and returns a URL as a string. :param components: the parsed URL as tuple which should be converted into a URL string. """ scheme, netloc, path, query, fragment = normalize_string_tuple(components) s = make_literal_wrapper(scheme) url = s("") # We generally treat file:///x and file:/x the same which is also # what browsers seem to do. This also allows us to ignore a schema # register for netloc utilization or having to differentiate between # empty and missing netloc. if netloc or (scheme and path.startswith(s("/"))): if path and path[:1] != s("/"): path = s("/") + path url = s("//") + (netloc or s("")) + path elif path: url += path if scheme: url = scheme + s(":") + url if query: url = url + s("?") + query if fragment: url = url + s("#") + fragment return url def url_unquote(string, charset="utf-8", errors="replace", unsafe=""): """URL decode a single string with a given encoding. If the charset is set to `None` no unicode decoding is performed and raw bytes are returned. :param s: the string to unquote. :param charset: the charset of the query string. If set to `None` no unicode decoding will take place. :param errors: the error handling for the charset decoding. """ rv = _unquote_to_bytes(string, unsafe) if charset is not None: rv = rv.decode(charset, errors) return rv def url_unquote_plus(s, charset="utf-8", errors="replace"): """URL decode a single string with the given `charset` and decode "+" to whitespace. Per default encoding errors are ignored. If you want a different behavior you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a :exc:`HTTPUnicodeError` is raised. :param s: The string to unquote. :param charset: the charset of the query string. If set to `None` no unicode decoding will take place. :param errors: The error handling for the `charset` decoding. """ if isinstance(s, text_type): s = s.replace(u"+", u" ") else: s = s.replace(b"+", b" ") return url_unquote(s, charset, errors) def url_fix(s, charset="utf-8"): r"""Sometimes you get an URL by a user that just isn't a real URL because it contains unsafe characters like ' ' and so on. This function can fix some of the problems in a similar way browsers handle data entered by the user: >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)') 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)' :param s: the string with the URL to fix. :param charset: The target charset for the URL if the url was given as unicode string. """ # First step is to switch to unicode processing and to convert # backslashes (which are invalid in URLs anyways) to slashes. This is # consistent with what Chrome does. s = to_unicode(s, charset, "replace").replace("\\", "/") # For the specific case that we look like a malformed windows URL # we want to fix this up manually: if s.startswith("file://") and s[7:8].isalpha() and s[8:10] in (":/", "|/"): s = "file:///" + s[7:] url = url_parse(s) path = url_quote(url.path, charset, safe="/%+$!*'(),") qs = url_quote_plus(url.query, charset, safe=":&%=+$!*'(),") anchor = url_quote_plus(url.fragment, charset, safe=":&%=+$!*'(),") return to_native(url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor))) # not-unreserved characters remain quoted when unquoting to IRI _to_iri_unsafe = "".join([chr(c) for c in range(128) if c not in _always_safe]) def _codec_error_url_quote(e): """Used in :func:`uri_to_iri` after unquoting to re-quote any invalid bytes. """ out = _fast_url_quote(e.object[e.start : e.end]) if PY2: out = out.decode("utf-8") return out, e.end codecs.register_error("werkzeug.url_quote", _codec_error_url_quote) def uri_to_iri(uri, charset="utf-8", errors="werkzeug.url_quote"): """Convert a URI to an IRI. All valid UTF-8 characters are unquoted, leaving all reserved and invalid characters quoted. If the URL has a domain, it is decoded from Punycode. >>> uri_to_iri("http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF") 'http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF' :param uri: The URI to convert. :param charset: The encoding to encode unquoted bytes with. :param errors: Error handler to use during ``bytes.encode``. By default, invalid bytes are left quoted. .. versionchanged:: 0.15 All reserved and invalid characters remain quoted. Previously, only some reserved characters were preserved, and invalid bytes were replaced instead of left quoted. .. versionadded:: 0.6 """ if isinstance(uri, tuple): uri = url_unparse(uri) uri = url_parse(to_unicode(uri, charset)) path = url_unquote(uri.path, charset, errors, _to_iri_unsafe) query = url_unquote(uri.query, charset, errors, _to_iri_unsafe) fragment = url_unquote(uri.fragment, charset, errors, _to_iri_unsafe) return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment)) # reserved characters remain unquoted when quoting to URI _to_uri_safe = ":/?#[]@!$&'()*+,;=%" def iri_to_uri(iri, charset="utf-8", errors="strict", safe_conversion=False): """Convert an IRI to a URI. All non-ASCII and unsafe characters are quoted. If the URL has a domain, it is encoded to Punycode. >>> iri_to_uri('http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF') 'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF' :param iri: The IRI to convert. :param charset: The encoding of the IRI. :param errors: Error handler to use during ``bytes.encode``. :param safe_conversion: Return the URL unchanged if it only contains ASCII characters and no whitespace. See the explanation below. There is a general problem with IRI conversion with some protocols that are in violation of the URI specification. Consider the following two IRIs:: magnet:?xt=uri:whatever itms-services://?action=download-manifest After parsing, we don't know if the scheme requires the ``//``, which is dropped if empty, but conveys different meanings in the final URL if it's present or not. In this case, you can use ``safe_conversion``, which will return the URL unchanged if it only contains ASCII characters and no whitespace. This can result in a URI with unquoted characters if it was not already quoted correctly, but preserves the URL's semantics. Werkzeug uses this for the ``Location`` header for redirects. .. versionchanged:: 0.15 All reserved characters remain unquoted. Previously, only some reserved characters were left unquoted. .. versionchanged:: 0.9.6 The ``safe_conversion`` parameter was added. .. versionadded:: 0.6 """ if isinstance(iri, tuple): iri = url_unparse(iri) if safe_conversion: # If we're not sure if it's safe to convert the URL, and it only # contains ASCII characters, return it unconverted. try: native_iri = to_native(iri) ascii_iri = native_iri.encode("ascii") # Only return if it doesn't have whitespace. (Why?) if len(ascii_iri.split()) == 1: return native_iri except UnicodeError: pass iri = url_parse(to_unicode(iri, charset, errors)) path = url_quote(iri.path, charset, errors, _to_uri_safe) query = url_quote(iri.query, charset, errors, _to_uri_safe) fragment = url_quote(iri.fragment, charset, errors, _to_uri_safe) return to_native( url_unparse((iri.scheme, iri.encode_netloc(), path, query, fragment)) ) def url_decode( s, charset="utf-8", decode_keys=False, include_empty=True, errors="replace", separator="&", cls=None, ): """ Parse a querystring and return it as :class:`MultiDict`. There is a difference in key decoding on different Python versions. On Python 3 keys will always be fully decoded whereas on Python 2, keys will remain bytestrings if they fit into ASCII. On 2.x keys can be forced to be unicode by setting `decode_keys` to `True`. If the charset is set to `None` no unicode decoding will happen and raw bytes will be returned. Per default a missing value for a key will default to an empty key. If you don't want that behavior you can set `include_empty` to `False`. Per default encoding errors are ignored. If you want a different behavior you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a `HTTPUnicodeError` is raised. .. versionchanged:: 0.5 In previous versions ";" and "&" could be used for url decoding. This changed in 0.5 where only "&" is supported. If you want to use ";" instead a different `separator` can be provided. The `cls` parameter was added. :param s: a string with the query string to decode. :param charset: the charset of the query string. If set to `None` no unicode decoding will take place. :param decode_keys: Used on Python 2.x to control whether keys should be forced to be unicode objects. If set to `True` then keys will be unicode in all cases. Otherwise, they remain `str` if they fit into ASCII. :param include_empty: Set to `False` if you don't want empty values to appear in the dict. :param errors: the decoding error behavior. :param separator: the pair separator to be used, defaults to ``&`` :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`MultiDict` is used. """ if cls is None: from .datastructures import MultiDict cls = MultiDict if isinstance(s, text_type) and not isinstance(separator, text_type): separator = separator.decode(charset or "ascii") elif isinstance(s, bytes) and not isinstance(separator, bytes): separator = separator.encode(charset or "ascii") return cls( _url_decode_impl( s.split(separator), charset, decode_keys, include_empty, errors ) ) def url_decode_stream( stream, charset="utf-8", decode_keys=False, include_empty=True, errors="replace", separator="&", cls=None, limit=None, return_iterator=False, ): """Works like :func:`url_decode` but decodes a stream. The behavior of stream and limit follows functions like :func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is directly fed to the `cls` so you can consume the data while it's parsed. .. versionadded:: 0.8 :param stream: a stream with the encoded querystring :param charset: the charset of the query string. If set to `None` no unicode decoding will take place. :param decode_keys: Used on Python 2.x to control whether keys should be forced to be unicode objects. If set to `True`, keys will be unicode in all cases. Otherwise, they remain `str` if they fit into ASCII. :param include_empty: Set to `False` if you don't want empty values to appear in the dict. :param errors: the decoding error behavior. :param separator: the pair separator to be used, defaults to ``&`` :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`MultiDict` is used. :param limit: the content length of the URL data. Not necessary if a limited stream is provided. :param return_iterator: if set to `True` the `cls` argument is ignored and an iterator over all decoded pairs is returned """ from .wsgi import make_chunk_iter pair_iter = make_chunk_iter(stream, separator, limit) decoder = _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors) if return_iterator: return decoder if cls is None: from .datastructures import MultiDict cls = MultiDict return cls(decoder) def _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors): for pair in pair_iter: if not pair: continue s = make_literal_wrapper(pair) equal = s("=") if equal in pair: key, value = pair.split(equal, 1) else: if not include_empty: continue key = pair value = s("") key = url_unquote_plus(key, charset, errors) if charset is not None and PY2 and not decode_keys: key = try_coerce_native(key) yield key, url_unquote_plus(value, charset, errors) def url_encode( obj, charset="utf-8", encode_keys=False, sort=False, key=None, separator=b"&" ): """URL encode a dict/`MultiDict`. If a value is `None` it will not appear in the result string. Per default only values are encoded into the target charset strings. If `encode_keys` is set to ``True`` unicode keys are supported too. If `sort` is set to `True` the items are sorted by `key` or the default sorting algorithm. .. versionadded:: 0.5 `sort`, `key`, and `separator` were added. :param obj: the object to encode into a query string. :param charset: the charset of the query string. :param encode_keys: set to `True` if you have unicode keys. (Ignored on Python 3.x) :param sort: set to `True` if you want parameters to be sorted by `key`. :param separator: the separator to be used for the pairs. :param key: an optional function to be used for sorting. For more details check out the :func:`sorted` documentation. """ separator = to_native(separator, "ascii") return separator.join(_url_encode_impl(obj, charset, encode_keys, sort, key)) def url_encode_stream( obj, stream=None, charset="utf-8", encode_keys=False, sort=False, key=None, separator=b"&", ): """Like :meth:`url_encode` but writes the results to a stream object. If the stream is `None` a generator over all encoded pairs is returned. .. versionadded:: 0.8 :param obj: the object to encode into a query string. :param stream: a stream to write the encoded object into or `None` if an iterator over the encoded pairs should be returned. In that case the separator argument is ignored. :param charset: the charset of the query string. :param encode_keys: set to `True` if you have unicode keys. (Ignored on Python 3.x) :param sort: set to `True` if you want parameters to be sorted by `key`. :param separator: the separator to be used for the pairs. :param key: an optional function to be used for sorting. For more details check out the :func:`sorted` documentation. """ separator = to_native(separator, "ascii") gen = _url_encode_impl(obj, charset, encode_keys, sort, key) if stream is None: return gen for idx, chunk in enumerate(gen): if idx: stream.write(separator) stream.write(chunk) def url_join(base, url, allow_fragments=True): """Join a base URL and a possibly relative URL to form an absolute interpretation of the latter. :param base: the base URL for the join operation. :param url: the URL to join. :param allow_fragments: indicates whether fragments should be allowed. """ if isinstance(base, tuple): base = url_unparse(base) if isinstance(url, tuple): url = url_unparse(url) base, url = normalize_string_tuple((base, url)) s = make_literal_wrapper(base) if not base: return url if not url: return base bscheme, bnetloc, bpath, bquery, bfragment = url_parse( base, allow_fragments=allow_fragments ) scheme, netloc, path, query, fragment = url_parse(url, bscheme, allow_fragments) if scheme != bscheme: return url if netloc: return url_unparse((scheme, netloc, path, query, fragment)) netloc = bnetloc if path[:1] == s("/"): segments = path.split(s("/")) elif not path: segments = bpath.split(s("/")) if not query: query = bquery else: segments = bpath.split(s("/"))[:-1] + path.split(s("/")) # If the rightmost part is "./" we want to keep the slash but # remove the dot. if segments[-1] == s("."): segments[-1] = s("") # Resolve ".." and "." segments = [segment for segment in segments if segment != s(".")] while 1: i = 1 n = len(segments) - 1 while i < n: if segments[i] == s("..") and segments[i - 1] not in (s(""), s("..")): del segments[i - 1 : i + 1] break i += 1 else: break # Remove trailing ".." if the URL is absolute unwanted_marker = [s(""), s("..")] while segments[:2] == unwanted_marker: del segments[1] path = s("/").join(segments) return url_unparse((scheme, netloc, path, query, fragment)) class Href(object): """Implements a callable that constructs URLs with the given base. The function can be called with any number of positional and keyword arguments which than are used to assemble the URL. Works with URLs and posix paths. Positional arguments are appended as individual segments to the path of the URL: >>> href = Href('/foo') >>> href('bar', 23) '/foo/bar/23' >>> href('foo', bar=23) '/foo/foo?bar=23' If any of the arguments (positional or keyword) evaluates to `None` it will be skipped. If no keyword arguments are given the last argument can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass), otherwise the keyword arguments are used for the query parameters, cutting off the first trailing underscore of the parameter name: >>> href(is_=42) '/foo?is=42' >>> href({'foo': 'bar'}) '/foo?foo=bar' Combining of both methods is not allowed: >>> href({'foo': 'bar'}, bar=42) Traceback (most recent call last): ... TypeError: keyword arguments and query-dicts can't be combined Accessing attributes on the href object creates a new href object with the attribute name as prefix: >>> bar_href = href.bar >>> bar_href("blub") '/foo/bar/blub' If `sort` is set to `True` the items are sorted by `key` or the default sorting algorithm: >>> href = Href("/", sort=True) >>> href(a=1, b=2, c=3) '/?a=1&b=2&c=3' .. versionadded:: 0.5 `sort` and `key` were added. """ def __init__(self, base="./", charset="utf-8", sort=False, key=None): if not base: base = "./" self.base = base self.charset = charset self.sort = sort self.key = key def __getattr__(self, name): if name[:2] == "__": raise AttributeError(name) base = self.base if base[-1:] != "/": base += "/" return Href(url_join(base, name), self.charset, self.sort, self.key) def __call__(self, *path, **query): if path and isinstance(path[-1], dict): if query: raise TypeError("keyword arguments and query-dicts can't be combined") query, path = path[-1], path[:-1] elif query: query = dict( [(k.endswith("_") and k[:-1] or k, v) for k, v in query.items()] ) path = "/".join( [ to_unicode(url_quote(x, self.charset), "ascii") for x in path if x is not None ] ).lstrip("/") rv = self.base if path: if not rv.endswith("/"): rv += "/" rv = url_join(rv, "./" + path) if query: rv += "?" + to_unicode( url_encode(query, self.charset, sort=self.sort, key=self.key), "ascii" ) return to_native(rv)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/wsgi.py
# -*- coding: utf-8 -*- """ werkzeug.wsgi ~~~~~~~~~~~~~ This module implements WSGI related helpers. :copyright: 2007 Pallets :license: BSD-3-Clause """ import io import re from functools import partial from functools import update_wrapper from itertools import chain from ._compat import BytesIO from ._compat import implements_iterator from ._compat import make_literal_wrapper from ._compat import string_types from ._compat import text_type from ._compat import to_bytes from ._compat import to_unicode from ._compat import try_coerce_native from ._compat import wsgi_get_bytes from ._internal import _encode_idna from .urls import uri_to_iri from .urls import url_join from .urls import url_parse from .urls import url_quote def responder(f): """Marks a function as responder. Decorate a function with it and it will automatically call the return value as WSGI application. Example:: @responder def application(environ, start_response): return Response('Hello World!') """ return update_wrapper(lambda *a: f(*a)(*a[-2:]), f) def get_current_url( environ, root_only=False, strip_querystring=False, host_only=False, trusted_hosts=None, ): """A handy helper function that recreates the full URL as IRI for the current request or parts of it. Here's an example: >>> from werkzeug.test import create_environ >>> env = create_environ("/?param=foo", "http://localhost/script") >>> get_current_url(env) 'http://localhost/script/?param=foo' >>> get_current_url(env, root_only=True) 'http://localhost/script/' >>> get_current_url(env, host_only=True) 'http://localhost/' >>> get_current_url(env, strip_querystring=True) 'http://localhost/script/' This optionally it verifies that the host is in a list of trusted hosts. If the host is not in there it will raise a :exc:`~werkzeug.exceptions.SecurityError`. Note that the string returned might contain unicode characters as the representation is an IRI not an URI. If you need an ASCII only representation you can use the :func:`~werkzeug.urls.iri_to_uri` function: >>> from werkzeug.urls import iri_to_uri >>> iri_to_uri(get_current_url(env)) 'http://localhost/script/?param=foo' :param environ: the WSGI environment to get the current URL from. :param root_only: set `True` if you only want the root URL. :param strip_querystring: set to `True` if you don't want the querystring. :param host_only: set to `True` if the host URL should be returned. :param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted` for more information. """ tmp = [environ["wsgi.url_scheme"], "://", get_host(environ, trusted_hosts)] cat = tmp.append if host_only: return uri_to_iri("".join(tmp) + "/") cat(url_quote(wsgi_get_bytes(environ.get("SCRIPT_NAME", ""))).rstrip("/")) cat("/") if not root_only: cat(url_quote(wsgi_get_bytes(environ.get("PATH_INFO", "")).lstrip(b"/"))) if not strip_querystring: qs = get_query_string(environ) if qs: cat("?" + qs) return uri_to_iri("".join(tmp)) def host_is_trusted(hostname, trusted_list): """Checks if a host is trusted against a list. This also takes care of port normalization. .. versionadded:: 0.9 :param hostname: the hostname to check :param trusted_list: a list of hostnames to check against. If a hostname starts with a dot it will match against all subdomains as well. """ if not hostname: return False if isinstance(trusted_list, string_types): trusted_list = [trusted_list] def _normalize(hostname): if ":" in hostname: hostname = hostname.rsplit(":", 1)[0] return _encode_idna(hostname) try: hostname = _normalize(hostname) except UnicodeError: return False for ref in trusted_list: if ref.startswith("."): ref = ref[1:] suffix_match = True else: suffix_match = False try: ref = _normalize(ref) except UnicodeError: return False if ref == hostname: return True if suffix_match and hostname.endswith(b"." + ref): return True return False def get_host(environ, trusted_hosts=None): """Return the host for the given WSGI environment. This first checks the ``Host`` header. If it's not present, then ``SERVER_NAME`` and ``SERVER_PORT`` are used. The host will only contain the port if it is different than the standard port for the protocol. Optionally, verify that the host is trusted using :func:`host_is_trusted` and raise a :exc:`~werkzeug.exceptions.SecurityError` if it is not. :param environ: The WSGI environment to get the host from. :param trusted_hosts: A list of trusted hosts. :return: Host, with port if necessary. :raise ~werkzeug.exceptions.SecurityError: If the host is not trusted. """ if "HTTP_HOST" in environ: rv = environ["HTTP_HOST"] if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"): rv = rv[:-3] elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"): rv = rv[:-4] else: rv = environ["SERVER_NAME"] if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in ( ("https", "443"), ("http", "80"), ): rv += ":" + environ["SERVER_PORT"] if trusted_hosts is not None: if not host_is_trusted(rv, trusted_hosts): from .exceptions import SecurityError raise SecurityError('Host "%s" is not trusted' % rv) return rv def get_content_length(environ): """Returns the content length from the WSGI environment as integer. If it's not available or chunked transfer encoding is used, ``None`` is returned. .. versionadded:: 0.9 :param environ: the WSGI environ to fetch the content length from. """ if environ.get("HTTP_TRANSFER_ENCODING", "") == "chunked": return None content_length = environ.get("CONTENT_LENGTH") if content_length is not None: try: return max(0, int(content_length)) except (ValueError, TypeError): pass def get_input_stream(environ, safe_fallback=True): """Returns the input stream from the WSGI environment and wraps it in the most sensible way possible. The stream returned is not the raw WSGI stream in most cases but one that is safe to read from without taking into account the content length. If content length is not set, the stream will be empty for safety reasons. If the WSGI server supports chunked or infinite streams, it should set the ``wsgi.input_terminated`` value in the WSGI environ to indicate that. .. versionadded:: 0.9 :param environ: the WSGI environ to fetch the stream from. :param safe_fallback: use an empty stream as a safe fallback when the content length is not set. Disabling this allows infinite streams, which can be a denial-of-service risk. """ stream = environ["wsgi.input"] content_length = get_content_length(environ) # A wsgi extension that tells us if the input is terminated. In # that case we return the stream unchanged as we know we can safely # read it until the end. if environ.get("wsgi.input_terminated"): return stream # If the request doesn't specify a content length, returning the stream is # potentially dangerous because it could be infinite, malicious or not. If # safe_fallback is true, return an empty stream instead for safety. if content_length is None: return BytesIO() if safe_fallback else stream # Otherwise limit the stream to the content length return LimitedStream(stream, content_length) def get_query_string(environ): """Returns the `QUERY_STRING` from the WSGI environment. This also takes care about the WSGI decoding dance on Python 3 environments as a native string. The string returned will be restricted to ASCII characters. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the query string from. """ qs = wsgi_get_bytes(environ.get("QUERY_STRING", "")) # QUERY_STRING really should be ascii safe but some browsers # will send us some unicode stuff (I am looking at you IE). # In that case we want to urllib quote it badly. return try_coerce_native(url_quote(qs, safe=":&%=+$!*'(),")) def get_path_info(environ, charset="utf-8", errors="replace"): """Returns the `PATH_INFO` from the WSGI environment and properly decodes it. This also takes care about the WSGI decoding dance on Python 3 environments. if the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the path from. :param charset: the charset for the path info, or `None` if no decoding should be performed. :param errors: the decoding error handling. """ path = wsgi_get_bytes(environ.get("PATH_INFO", "")) return to_unicode(path, charset, errors, allow_none_charset=True) def get_script_name(environ, charset="utf-8", errors="replace"): """Returns the `SCRIPT_NAME` from the WSGI environment and properly decodes it. This also takes care about the WSGI decoding dance on Python 3 environments. if the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the path from. :param charset: the charset for the path, or `None` if no decoding should be performed. :param errors: the decoding error handling. """ path = wsgi_get_bytes(environ.get("SCRIPT_NAME", "")) return to_unicode(path, charset, errors, allow_none_charset=True) def pop_path_info(environ, charset="utf-8", errors="replace"): """Removes and returns the next segment of `PATH_INFO`, pushing it onto `SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`. If the `charset` is set to `None` a bytestring is returned. If there are empty segments (``'/foo//bar``) these are ignored but properly pushed to the `SCRIPT_NAME`: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> pop_path_info(env) 'a' >>> env['SCRIPT_NAME'] '/foo/a' >>> pop_path_info(env) 'b' >>> env['SCRIPT_NAME'] '/foo/a/b' .. versionadded:: 0.5 .. versionchanged:: 0.9 The path is now decoded and a charset and encoding parameter can be provided. :param environ: the WSGI environment that is modified. """ path = environ.get("PATH_INFO") if not path: return None script_name = environ.get("SCRIPT_NAME", "") # shift multiple leading slashes over old_path = path path = path.lstrip("/") if path != old_path: script_name += "/" * (len(old_path) - len(path)) if "/" not in path: environ["PATH_INFO"] = "" environ["SCRIPT_NAME"] = script_name + path rv = wsgi_get_bytes(path) else: segment, path = path.split("/", 1) environ["PATH_INFO"] = "/" + path environ["SCRIPT_NAME"] = script_name + segment rv = wsgi_get_bytes(segment) return to_unicode(rv, charset, errors, allow_none_charset=True) def peek_path_info(environ, charset="utf-8", errors="replace"): """Returns the next segment on the `PATH_INFO` or `None` if there is none. Works like :func:`pop_path_info` without modifying the environment: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> peek_path_info(env) 'a' >>> peek_path_info(env) 'a' If the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.5 .. versionchanged:: 0.9 The path is now decoded and a charset and encoding parameter can be provided. :param environ: the WSGI environment that is checked. """ segments = environ.get("PATH_INFO", "").lstrip("/").split("/", 1) if segments: return to_unicode( wsgi_get_bytes(segments[0]), charset, errors, allow_none_charset=True ) def extract_path_info( environ_or_baseurl, path_or_url, charset="utf-8", errors="werkzeug.url_quote", collapse_http_schemes=True, ): """Extracts the path info from the given URL (or WSGI environment) and path. The path info returned is a unicode string, not a bytestring suitable for a WSGI environment. The URLs might also be IRIs. If the path info could not be determined, `None` is returned. Some examples: >>> extract_path_info('http://example.com/app', '/app/hello') u'/hello' >>> extract_path_info('http://example.com/app', ... 'https://example.com/app/hello') u'/hello' >>> extract_path_info('http://example.com/app', ... 'https://example.com/app/hello', ... collapse_http_schemes=False) is None True Instead of providing a base URL you can also pass a WSGI environment. :param environ_or_baseurl: a WSGI environment dict, a base URL or base IRI. This is the root of the application. :param path_or_url: an absolute path from the server root, a relative path (in which case it's the path info) or a full URL. Also accepts IRIs and unicode parameters. :param charset: the charset for byte data in URLs :param errors: the error handling on decode :param collapse_http_schemes: if set to `False` the algorithm does not assume that http and https on the same server point to the same resource. .. versionchanged:: 0.15 The ``errors`` parameter defaults to leaving invalid bytes quoted instead of replacing them. .. versionadded:: 0.6 """ def _normalize_netloc(scheme, netloc): parts = netloc.split(u"@", 1)[-1].split(u":", 1) if len(parts) == 2: netloc, port = parts if (scheme == u"http" and port == u"80") or ( scheme == u"https" and port == u"443" ): port = None else: netloc = parts[0] port = None if port is not None: netloc += u":" + port return netloc # make sure whatever we are working on is a IRI and parse it path = uri_to_iri(path_or_url, charset, errors) if isinstance(environ_or_baseurl, dict): environ_or_baseurl = get_current_url(environ_or_baseurl, root_only=True) base_iri = uri_to_iri(environ_or_baseurl, charset, errors) base_scheme, base_netloc, base_path = url_parse(base_iri)[:3] cur_scheme, cur_netloc, cur_path, = url_parse(url_join(base_iri, path))[:3] # normalize the network location base_netloc = _normalize_netloc(base_scheme, base_netloc) cur_netloc = _normalize_netloc(cur_scheme, cur_netloc) # is that IRI even on a known HTTP scheme? if collapse_http_schemes: for scheme in base_scheme, cur_scheme: if scheme not in (u"http", u"https"): return None else: if not (base_scheme in (u"http", u"https") and base_scheme == cur_scheme): return None # are the netlocs compatible? if base_netloc != cur_netloc: return None # are we below the application path? base_path = base_path.rstrip(u"/") if not cur_path.startswith(base_path): return None return u"/" + cur_path[len(base_path) :].lstrip(u"/") @implements_iterator class ClosingIterator(object): """The WSGI specification requires that all middlewares and gateways respect the `close` callback of the iterable returned by the application. Because it is useful to add another close action to a returned iterable and adding a custom iterable is a boring task this class can be used for that:: return ClosingIterator(app(environ, start_response), [cleanup_session, cleanup_locals]) If there is just one close function it can be passed instead of the list. A closing iterator is not needed if the application uses response objects and finishes the processing if the response is started:: try: return response(environ, start_response) finally: cleanup_session() cleanup_locals() """ def __init__(self, iterable, callbacks=None): iterator = iter(iterable) self._next = partial(next, iterator) if callbacks is None: callbacks = [] elif callable(callbacks): callbacks = [callbacks] else: callbacks = list(callbacks) iterable_close = getattr(iterable, "close", None) if iterable_close: callbacks.insert(0, iterable_close) self._callbacks = callbacks def __iter__(self): return self def __next__(self): return self._next() def close(self): for callback in self._callbacks: callback() def wrap_file(environ, file, buffer_size=8192): """Wraps a file. This uses the WSGI server's file wrapper if available or otherwise the generic :class:`FileWrapper`. .. versionadded:: 0.5 If the file wrapper from the WSGI server is used it's important to not iterate over it from inside the application but to pass it through unchanged. If you want to pass out a file wrapper inside a response object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`. More information about file wrappers are available in :pep:`333`. :param file: a :class:`file`-like object with a :meth:`~file.read` method. :param buffer_size: number of bytes for one iteration. """ return environ.get("wsgi.file_wrapper", FileWrapper)(file, buffer_size) @implements_iterator class FileWrapper(object): """This class can be used to convert a :class:`file`-like object into an iterable. It yields `buffer_size` blocks until the file is fully read. You should not use this class directly but rather use the :func:`wrap_file` function that uses the WSGI server's file wrapper support if it's available. .. versionadded:: 0.5 If you're using this object together with a :class:`BaseResponse` you have to use the `direct_passthrough` mode. :param file: a :class:`file`-like object with a :meth:`~file.read` method. :param buffer_size: number of bytes for one iteration. """ def __init__(self, file, buffer_size=8192): self.file = file self.buffer_size = buffer_size def close(self): if hasattr(self.file, "close"): self.file.close() def seekable(self): if hasattr(self.file, "seekable"): return self.file.seekable() if hasattr(self.file, "seek"): return True return False def seek(self, *args): if hasattr(self.file, "seek"): self.file.seek(*args) def tell(self): if hasattr(self.file, "tell"): return self.file.tell() return None def __iter__(self): return self def __next__(self): data = self.file.read(self.buffer_size) if data: return data raise StopIteration() @implements_iterator class _RangeWrapper(object): # private for now, but should we make it public in the future ? """This class can be used to convert an iterable object into an iterable that will only yield a piece of the underlying content. It yields blocks until the underlying stream range is fully read. The yielded blocks will have a size that can't exceed the original iterator defined block size, but that can be smaller. If you're using this object together with a :class:`BaseResponse` you have to use the `direct_passthrough` mode. :param iterable: an iterable object with a :meth:`__next__` method. :param start_byte: byte from which read will start. :param byte_range: how many bytes to read. """ def __init__(self, iterable, start_byte=0, byte_range=None): self.iterable = iter(iterable) self.byte_range = byte_range self.start_byte = start_byte self.end_byte = None if byte_range is not None: self.end_byte = self.start_byte + self.byte_range self.read_length = 0 self.seekable = hasattr(iterable, "seekable") and iterable.seekable() self.end_reached = False def __iter__(self): return self def _next_chunk(self): try: chunk = next(self.iterable) self.read_length += len(chunk) return chunk except StopIteration: self.end_reached = True raise def _first_iteration(self): chunk = None if self.seekable: self.iterable.seek(self.start_byte) self.read_length = self.iterable.tell() contextual_read_length = self.read_length else: while self.read_length <= self.start_byte: chunk = self._next_chunk() if chunk is not None: chunk = chunk[self.start_byte - self.read_length :] contextual_read_length = self.start_byte return chunk, contextual_read_length def _next(self): if self.end_reached: raise StopIteration() chunk = None contextual_read_length = self.read_length if self.read_length == 0: chunk, contextual_read_length = self._first_iteration() if chunk is None: chunk = self._next_chunk() if self.end_byte is not None and self.read_length >= self.end_byte: self.end_reached = True return chunk[: self.end_byte - contextual_read_length] return chunk def __next__(self): chunk = self._next() if chunk: return chunk self.end_reached = True raise StopIteration() def close(self): if hasattr(self.iterable, "close"): self.iterable.close() def _make_chunk_iter(stream, limit, buffer_size): """Helper for the line and chunk iter functions.""" if isinstance(stream, (bytes, bytearray, text_type)): raise TypeError( "Passed a string or byte object instead of true iterator or stream." ) if not hasattr(stream, "read"): for item in stream: if item: yield item return if not isinstance(stream, LimitedStream) and limit is not None: stream = LimitedStream(stream, limit) _read = stream.read while 1: item = _read(buffer_size) if not item: break yield item def make_line_iter(stream, limit=None, buffer_size=10 * 1024, cap_at_buffer=False): """Safely iterates line-based over an input stream. If the input stream is not a :class:`LimitedStream` the `limit` parameter is mandatory. This uses the stream's :meth:`~file.read` method internally as opposite to the :meth:`~file.readline` method that is unsafe and can only be used in violation of the WSGI specification. The same problem applies to the `__iter__` function of the input stream which calls :meth:`~file.readline` without arguments. If you need line-by-line processing it's strongly recommended to iterate over the input stream using this helper function. .. versionchanged:: 0.8 This function now ensures that the limit was reached. .. versionadded:: 0.9 added support for iterators as input stream. .. versionadded:: 0.11.10 added support for the `cap_at_buffer` parameter. :param stream: the stream or iterate to iterate over. :param limit: the limit in bytes for the stream. (Usually content length. Not necessary if the `stream` is a :class:`LimitedStream`. :param buffer_size: The optional buffer size. :param cap_at_buffer: if this is set chunks are split if they are longer than the buffer size. Internally this is implemented that the buffer size might be exhausted by a factor of two however. """ _iter = _make_chunk_iter(stream, limit, buffer_size) first_item = next(_iter, "") if not first_item: return s = make_literal_wrapper(first_item) empty = s("") cr = s("\r") lf = s("\n") crlf = s("\r\n") _iter = chain((first_item,), _iter) def _iter_basic_lines(): _join = empty.join buffer = [] while 1: new_data = next(_iter, "") if not new_data: break new_buf = [] buf_size = 0 for item in chain(buffer, new_data.splitlines(True)): new_buf.append(item) buf_size += len(item) if item and item[-1:] in crlf: yield _join(new_buf) new_buf = [] elif cap_at_buffer and buf_size >= buffer_size: rv = _join(new_buf) while len(rv) >= buffer_size: yield rv[:buffer_size] rv = rv[buffer_size:] new_buf = [rv] buffer = new_buf if buffer: yield _join(buffer) # This hackery is necessary to merge 'foo\r' and '\n' into one item # of 'foo\r\n' if we were unlucky and we hit a chunk boundary. previous = empty for item in _iter_basic_lines(): if item == lf and previous[-1:] == cr: previous += item item = empty if previous: yield previous previous = item if previous: yield previous def make_chunk_iter( stream, separator, limit=None, buffer_size=10 * 1024, cap_at_buffer=False ): """Works like :func:`make_line_iter` but accepts a separator which divides chunks. If you want newline based processing you should use :func:`make_line_iter` instead as it supports arbitrary newline markers. .. versionadded:: 0.8 .. versionadded:: 0.9 added support for iterators as input stream. .. versionadded:: 0.11.10 added support for the `cap_at_buffer` parameter. :param stream: the stream or iterate to iterate over. :param separator: the separator that divides chunks. :param limit: the limit in bytes for the stream. (Usually content length. Not necessary if the `stream` is otherwise already limited). :param buffer_size: The optional buffer size. :param cap_at_buffer: if this is set chunks are split if they are longer than the buffer size. Internally this is implemented that the buffer size might be exhausted by a factor of two however. """ _iter = _make_chunk_iter(stream, limit, buffer_size) first_item = next(_iter, "") if not first_item: return _iter = chain((first_item,), _iter) if isinstance(first_item, text_type): separator = to_unicode(separator) _split = re.compile(r"(%s)" % re.escape(separator)).split _join = u"".join else: separator = to_bytes(separator) _split = re.compile(b"(" + re.escape(separator) + b")").split _join = b"".join buffer = [] while 1: new_data = next(_iter, "") if not new_data: break chunks = _split(new_data) new_buf = [] buf_size = 0 for item in chain(buffer, chunks): if item == separator: yield _join(new_buf) new_buf = [] buf_size = 0 else: buf_size += len(item) new_buf.append(item) if cap_at_buffer and buf_size >= buffer_size: rv = _join(new_buf) while len(rv) >= buffer_size: yield rv[:buffer_size] rv = rv[buffer_size:] new_buf = [rv] buf_size = len(rv) buffer = new_buf if buffer: yield _join(buffer) @implements_iterator class LimitedStream(io.IOBase): """Wraps a stream so that it doesn't read more than n bytes. If the stream is exhausted and the caller tries to get more bytes from it :func:`on_exhausted` is called which by default returns an empty string. The return value of that function is forwarded to the reader function. So if it returns an empty string :meth:`read` will return an empty string as well. The limit however must never be higher than what the stream can output. Otherwise :meth:`readlines` will try to read past the limit. .. admonition:: Note on WSGI compliance calls to :meth:`readline` and :meth:`readlines` are not WSGI compliant because it passes a size argument to the readline methods. Unfortunately the WSGI PEP is not safely implementable without a size argument to :meth:`readline` because there is no EOF marker in the stream. As a result of that the use of :meth:`readline` is discouraged. For the same reason iterating over the :class:`LimitedStream` is not portable. It internally calls :meth:`readline`. We strongly suggest using :meth:`read` only or using the :func:`make_line_iter` which safely iterates line-based over a WSGI input stream. :param stream: the stream to wrap. :param limit: the limit for the stream, must not be longer than what the string can provide if the stream does not end with `EOF` (like `wsgi.input`) """ def __init__(self, stream, limit): self._read = stream.read self._readline = stream.readline self._pos = 0 self.limit = limit def __iter__(self): return self @property def is_exhausted(self): """If the stream is exhausted this attribute is `True`.""" return self._pos >= self.limit def on_exhausted(self): """This is called when the stream tries to read past the limit. The return value of this function is returned from the reading function. """ # Read null bytes from the stream so that we get the # correct end of stream marker. return self._read(0) def on_disconnect(self): """What should happen if a disconnect is detected? The return value of this function is returned from read functions in case the client went away. By default a :exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised. """ from .exceptions import ClientDisconnected raise ClientDisconnected() def exhaust(self, chunk_size=1024 * 64): """Exhaust the stream. This consumes all the data left until the limit is reached. :param chunk_size: the size for a chunk. It will read the chunk until the stream is exhausted and throw away the results. """ to_read = self.limit - self._pos chunk = chunk_size while to_read > 0: chunk = min(to_read, chunk) self.read(chunk) to_read -= chunk def read(self, size=None): """Read `size` bytes or if size is not provided everything is read. :param size: the number of bytes read. """ if self._pos >= self.limit: return self.on_exhausted() if size is None or size == -1: # -1 is for consistence with file size = self.limit to_read = min(self.limit - self._pos, size) try: read = self._read(to_read) except (IOError, ValueError): return self.on_disconnect() if to_read and len(read) != to_read: return self.on_disconnect() self._pos += len(read) return read def readline(self, size=None): """Reads one line from the stream.""" if self._pos >= self.limit: return self.on_exhausted() if size is None: size = self.limit - self._pos else: size = min(size, self.limit - self._pos) try: line = self._readline(size) except (ValueError, IOError): return self.on_disconnect() if size and not line: return self.on_disconnect() self._pos += len(line) return line def readlines(self, size=None): """Reads a file into a list of strings. It calls :meth:`readline` until the file is read to the end. It does support the optional `size` argument if the underlying stream supports it for `readline`. """ last_pos = self._pos result = [] if size is not None: end = min(self.limit, last_pos + size) else: end = self.limit while 1: if size is not None: size -= last_pos - self._pos if self._pos >= end: break result.append(self.readline(size)) if size is not None: last_pos = self._pos return result def tell(self): """Returns the position of the stream. .. versionadded:: 0.9 """ return self._pos def __next__(self): line = self.readline() if not line: raise StopIteration() return line def readable(self): return True
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/testapp.py
# -*- coding: utf-8 -*- """ werkzeug.testapp ~~~~~~~~~~~~~~~~ Provide a small test application that can be used to test a WSGI server and check it for WSGI compliance. :copyright: 2007 Pallets :license: BSD-3-Clause """ import base64 import os import sys from textwrap import wrap from . import __version__ as _werkzeug_version from .utils import escape from .wrappers import BaseRequest as Request from .wrappers import BaseResponse as Response logo = Response( base64.b64decode( """ R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP///////// //////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25 7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf 78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45 Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8 ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64 gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9 YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2 KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs =""" ), mimetype="image/png", ) TEMPLATE = u"""\ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <title>WSGI Information</title> <style type="text/css"> @import url(https://fonts.googleapis.com/css?family=Ubuntu); body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif; background-color: white; color: #000; font-size: 15px; text-align: center; } #logo { float: right; padding: 0 0 10px 10px; } div.box { text-align: left; width: 45em; margin: auto; padding: 50px 0; background-color: white; } h1, h2 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif; font-weight: normal; } h1 { margin: 0 0 30px 0; } h2 { font-size: 1.4em; margin: 1em 0 0.5em 0; } table { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 } table th { background-color: #AFC1C4; color: white; font-size: 0.72em; font-weight: normal; width: 18em; vertical-align: top; padding: 0.5em 0 0.1em 0.5em; } table td { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; } code { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono', monospace; font-size: 0.7em; } ul li { line-height: 1.5em; } ul.path { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px; list-style: none; background: #E8EFF0; } ul.path li { line-height: 1.6em; } li.virtual { color: #999; text-decoration: underline; } li.exp { background: white; } </style> <div class="box"> <img src="?resource=logo" id="logo" alt="[The Werkzeug Logo]" /> <h1>WSGI Information</h1> <p> This page displays all available information about the WSGI server and the underlying Python interpreter. <h2 id="python-interpreter">Python Interpreter</h2> <table> <tr> <th>Python Version <td>%(python_version)s <tr> <th>Platform <td>%(platform)s [%(os)s] <tr> <th>API Version <td>%(api_version)s <tr> <th>Byteorder <td>%(byteorder)s <tr> <th>Werkzeug Version <td>%(werkzeug_version)s </table> <h2 id="wsgi-environment">WSGI Environment</h2> <table>%(wsgi_env)s</table> <h2 id="installed-eggs">Installed Eggs</h2> <p> The following python packages were installed on the system as Python eggs: <ul>%(python_eggs)s</ul> <h2 id="sys-path">System Path</h2> <p> The following paths are the current contents of the load path. The following entries are looked up for Python packages. Note that not all items in this path are folders. Gray and underlined items are entries pointing to invalid resources or used by custom import hooks such as the zip importer. <p> Items with a bright background were expanded for display from a relative path. If you encounter such paths in the output you might want to check your setup as relative paths are usually problematic in multithreaded environments. <ul class="path">%(sys_path)s</ul> </div> """ def iter_sys_path(): if os.name == "posix": def strip(x): prefix = os.path.expanduser("~") if x.startswith(prefix): x = "~" + x[len(prefix) :] return x else: def strip(x): return x cwd = os.path.abspath(os.getcwd()) for item in sys.path: path = os.path.join(cwd, item or os.path.curdir) yield strip(os.path.normpath(path)), not os.path.isdir(path), path != item def render_testapp(req): try: import pkg_resources except ImportError: eggs = () else: eggs = sorted(pkg_resources.working_set, key=lambda x: x.project_name.lower()) python_eggs = [] for egg in eggs: try: version = egg.version except (ValueError, AttributeError): version = "unknown" python_eggs.append( "<li>%s <small>[%s]</small>" % (escape(egg.project_name), escape(version)) ) wsgi_env = [] sorted_environ = sorted(req.environ.items(), key=lambda x: repr(x[0]).lower()) for key, value in sorted_environ: wsgi_env.append( "<tr><th>%s<td><code>%s</code>" % (escape(str(key)), " ".join(wrap(escape(repr(value))))) ) sys_path = [] for item, virtual, expanded in iter_sys_path(): class_ = [] if virtual: class_.append("virtual") if expanded: class_.append("exp") sys_path.append( "<li%s>%s" % (' class="%s"' % " ".join(class_) if class_ else "", escape(item)) ) return ( TEMPLATE % { "python_version": "<br>".join(escape(sys.version).splitlines()), "platform": escape(sys.platform), "os": escape(os.name), "api_version": sys.api_version, "byteorder": sys.byteorder, "werkzeug_version": _werkzeug_version, "python_eggs": "\n".join(python_eggs), "wsgi_env": "\n".join(wsgi_env), "sys_path": "\n".join(sys_path), } ).encode("utf-8") def test_app(environ, start_response): """Simple test application that dumps the environment. You can use it to check if Werkzeug is working properly: .. sourcecode:: pycon >>> from werkzeug.serving import run_simple >>> from werkzeug.testapp import test_app >>> run_simple('localhost', 3000, test_app) * Running on http://localhost:3000/ The application displays important information from the WSGI environment, the Python interpreter and the installed libraries. """ req = Request(environ, populate_request=False) if req.args.get("resource") == "logo": response = logo else: response = Response(render_testapp(req), mimetype="text/html") return response(environ, start_response) if __name__ == "__main__": from .serving import run_simple run_simple("localhost", 5000, test_app, use_reloader=True)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/middleware/dispatcher.py
""" Application Dispatcher ====================== This middleware creates a single WSGI application that dispatches to multiple other WSGI applications mounted at different URL paths. A common example is writing a Single Page Application, where you have a backend API and a frontend written in JavaScript that does the routing in the browser rather than requesting different pages from the server. The frontend is a single HTML and JS file that should be served for any path besides "/api". This example dispatches to an API app under "/api", an admin app under "/admin", and an app that serves frontend files for all other requests:: app = DispatcherMiddleware(serve_frontend, { '/api': api_app, '/admin': admin_app, }) In production, you might instead handle this at the HTTP server level, serving files or proxying to application servers based on location. The API and admin apps would each be deployed with a separate WSGI server, and the static files would be served directly by the HTTP server. .. autoclass:: DispatcherMiddleware :copyright: 2007 Pallets :license: BSD-3-Clause """ class DispatcherMiddleware(object): """Combine multiple applications as a single WSGI application. Requests are dispatched to an application based on the path it is mounted under. :param app: The WSGI application to dispatch to if the request doesn't match a mounted path. :param mounts: Maps path prefixes to applications for dispatching. """ def __init__(self, app, mounts=None): self.app = app self.mounts = mounts or {} def __call__(self, environ, start_response): script = environ.get("PATH_INFO", "") path_info = "" while "/" in script: if script in self.mounts: app = self.mounts[script] break script, last_item = script.rsplit("/", 1) path_info = "/%s%s" % (last_item, path_info) else: app = self.mounts.get(script, self.app) original_script_name = environ.get("SCRIPT_NAME", "") environ["SCRIPT_NAME"] = original_script_name + script environ["PATH_INFO"] = path_info return app(environ, start_response)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/middleware/proxy_fix.py
""" X-Forwarded-For Proxy Fix ========================= This module provides a middleware that adjusts the WSGI environ based on ``X-Forwarded-`` headers that proxies in front of an application may set. When an application is running behind a proxy server, WSGI may see the request as coming from that server rather than the real client. Proxies set various headers to track where the request actually came from. This middleware should only be applied if the application is actually behind such a proxy, and should be configured with the number of proxies that are chained in front of it. Not all proxies set all the headers. Since incoming headers can be faked, you must set how many proxies are setting each header so the middleware knows what to trust. .. autoclass:: ProxyFix :copyright: 2007 Pallets :license: BSD-3-Clause """ from werkzeug.http import parse_list_header class ProxyFix(object): """Adjust the WSGI environ based on ``X-Forwarded-`` that proxies in front of the application may set. - ``X-Forwarded-For`` sets ``REMOTE_ADDR``. - ``X-Forwarded-Proto`` sets ``wsgi.url_scheme``. - ``X-Forwarded-Host`` sets ``HTTP_HOST``, ``SERVER_NAME``, and ``SERVER_PORT``. - ``X-Forwarded-Port`` sets ``HTTP_HOST`` and ``SERVER_PORT``. - ``X-Forwarded-Prefix`` sets ``SCRIPT_NAME``. You must tell the middleware how many proxies set each header so it knows what values to trust. It is a security issue to trust values that came from the client rather than a proxy. The original values of the headers are stored in the WSGI environ as ``werkzeug.proxy_fix.orig``, a dict. :param app: The WSGI application to wrap. :param x_for: Number of values to trust for ``X-Forwarded-For``. :param x_proto: Number of values to trust for ``X-Forwarded-Proto``. :param x_host: Number of values to trust for ``X-Forwarded-Host``. :param x_port: Number of values to trust for ``X-Forwarded-Port``. :param x_prefix: Number of values to trust for ``X-Forwarded-Prefix``. .. code-block:: python from werkzeug.middleware.proxy_fix import ProxyFix # App is behind one proxy that sets the -For and -Host headers. app = ProxyFix(app, x_for=1, x_host=1) .. versionchanged:: 1.0 Deprecated code has been removed: * The ``num_proxies`` argument and attribute. * The ``get_remote_addr`` method. * The environ keys ``orig_remote_addr``, ``orig_wsgi_url_scheme``, and ``orig_http_host``. .. versionchanged:: 0.15 All headers support multiple values. The ``num_proxies`` argument is deprecated. Each header is configured with a separate number of trusted proxies. .. versionchanged:: 0.15 Original WSGI environ values are stored in the ``werkzeug.proxy_fix.orig`` dict. ``orig_remote_addr``, ``orig_wsgi_url_scheme``, and ``orig_http_host`` are deprecated and will be removed in 1.0. .. versionchanged:: 0.15 Support ``X-Forwarded-Port`` and ``X-Forwarded-Prefix``. .. versionchanged:: 0.15 ``X-Forwarded-Host`` and ``X-Forwarded-Port`` modify ``SERVER_NAME`` and ``SERVER_PORT``. """ def __init__(self, app, x_for=1, x_proto=1, x_host=0, x_port=0, x_prefix=0): self.app = app self.x_for = x_for self.x_proto = x_proto self.x_host = x_host self.x_port = x_port self.x_prefix = x_prefix def _get_real_value(self, trusted, value): """Get the real value from a list header based on the configured number of trusted proxies. :param trusted: Number of values to trust in the header. :param value: Comma separated list header value to parse. :return: The real value, or ``None`` if there are fewer values than the number of trusted proxies. .. versionchanged:: 1.0 Renamed from ``_get_trusted_comma``. .. versionadded:: 0.15 """ if not (trusted and value): return values = parse_list_header(value) if len(values) >= trusted: return values[-trusted] def __call__(self, environ, start_response): """Modify the WSGI environ based on the various ``Forwarded`` headers before calling the wrapped application. Store the original environ values in ``werkzeug.proxy_fix.orig_{key}``. """ environ_get = environ.get orig_remote_addr = environ_get("REMOTE_ADDR") orig_wsgi_url_scheme = environ_get("wsgi.url_scheme") orig_http_host = environ_get("HTTP_HOST") environ.update( { "werkzeug.proxy_fix.orig": { "REMOTE_ADDR": orig_remote_addr, "wsgi.url_scheme": orig_wsgi_url_scheme, "HTTP_HOST": orig_http_host, "SERVER_NAME": environ_get("SERVER_NAME"), "SERVER_PORT": environ_get("SERVER_PORT"), "SCRIPT_NAME": environ_get("SCRIPT_NAME"), } } ) x_for = self._get_real_value(self.x_for, environ_get("HTTP_X_FORWARDED_FOR")) if x_for: environ["REMOTE_ADDR"] = x_for x_proto = self._get_real_value( self.x_proto, environ_get("HTTP_X_FORWARDED_PROTO") ) if x_proto: environ["wsgi.url_scheme"] = x_proto x_host = self._get_real_value(self.x_host, environ_get("HTTP_X_FORWARDED_HOST")) if x_host: environ["HTTP_HOST"] = x_host parts = x_host.split(":", 1) environ["SERVER_NAME"] = parts[0] if len(parts) == 2: environ["SERVER_PORT"] = parts[1] x_port = self._get_real_value(self.x_port, environ_get("HTTP_X_FORWARDED_PORT")) if x_port: host = environ.get("HTTP_HOST") if host: parts = host.split(":", 1) host = parts[0] if len(parts) == 2 else host environ["HTTP_HOST"] = "%s:%s" % (host, x_port) environ["SERVER_PORT"] = x_port x_prefix = self._get_real_value( self.x_prefix, environ_get("HTTP_X_FORWARDED_PREFIX") ) if x_prefix: environ["SCRIPT_NAME"] = x_prefix return self.app(environ, start_response)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/middleware/shared_data.py
""" Serve Shared Static Files ========================= .. autoclass:: SharedDataMiddleware :members: is_allowed :copyright: 2007 Pallets :license: BSD-3-Clause """ import mimetypes import os import pkgutil import posixpath from datetime import datetime from io import BytesIO from time import mktime from time import time from zlib import adler32 from .._compat import PY2 from .._compat import string_types from ..filesystem import get_filesystem_encoding from ..http import http_date from ..http import is_resource_modified from ..security import safe_join from ..utils import get_content_type from ..wsgi import get_path_info from ..wsgi import wrap_file class SharedDataMiddleware(object): """A WSGI middleware that provides static content for development environments or simple server setups. Usage is quite simple:: import os from werkzeug.middleware.shared_data import SharedDataMiddleware app = SharedDataMiddleware(app, { '/static': os.path.join(os.path.dirname(__file__), 'static') }) The contents of the folder ``./shared`` will now be available on ``http://example.com/shared/``. This is pretty useful during development because a standalone media server is not required. One can also mount files on the root folder and still continue to use the application because the shared data middleware forwards all unhandled requests to the application, even if the requests are below one of the shared folders. If `pkg_resources` is available you can also tell the middleware to serve files from package data:: app = SharedDataMiddleware(app, { '/static': ('myapplication', 'static') }) This will then serve the ``static`` folder in the `myapplication` Python package. The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch` rules for files that are not accessible from the web. If `cache` is set to `False` no caching headers are sent. Currently the middleware does not support non ASCII filenames. If the encoding on the file system happens to be the encoding of the URI it may work but this could also be by accident. We strongly suggest using ASCII only file names for static files. The middleware will guess the mimetype using the Python `mimetype` module. If it's unable to figure out the charset it will fall back to `fallback_mimetype`. :param app: the application to wrap. If you don't want to wrap an application you can pass it :exc:`NotFound`. :param exports: a list or dict of exported files and folders. :param disallow: a list of :func:`~fnmatch.fnmatch` rules. :param cache: enable or disable caching headers. :param cache_timeout: the cache timeout in seconds for the headers. :param fallback_mimetype: The fallback mimetype for unknown files. .. versionchanged:: 1.0 The default ``fallback_mimetype`` is ``application/octet-stream``. If a filename looks like a text mimetype, the ``utf-8`` charset is added to it. .. versionadded:: 0.6 Added ``fallback_mimetype``. .. versionchanged:: 0.5 Added ``cache_timeout``. """ def __init__( self, app, exports, disallow=None, cache=True, cache_timeout=60 * 60 * 12, fallback_mimetype="application/octet-stream", ): self.app = app self.exports = [] self.cache = cache self.cache_timeout = cache_timeout if hasattr(exports, "items"): exports = exports.items() for key, value in exports: if isinstance(value, tuple): loader = self.get_package_loader(*value) elif isinstance(value, string_types): if os.path.isfile(value): loader = self.get_file_loader(value) else: loader = self.get_directory_loader(value) else: raise TypeError("unknown def %r" % value) self.exports.append((key, loader)) if disallow is not None: from fnmatch import fnmatch self.is_allowed = lambda x: not fnmatch(x, disallow) self.fallback_mimetype = fallback_mimetype def is_allowed(self, filename): """Subclasses can override this method to disallow the access to certain files. However by providing `disallow` in the constructor this method is overwritten. """ return True def _opener(self, filename): return lambda: ( open(filename, "rb"), datetime.utcfromtimestamp(os.path.getmtime(filename)), int(os.path.getsize(filename)), ) def get_file_loader(self, filename): return lambda x: (os.path.basename(filename), self._opener(filename)) def get_package_loader(self, package, package_path): loadtime = datetime.utcnow() provider = pkgutil.get_loader(package) if hasattr(provider, "get_resource_reader"): # Python 3 reader = provider.get_resource_reader(package) def loader(path): if path is None: return None, None path = safe_join(package_path, path) basename = posixpath.basename(path) try: resource = reader.open_resource(path) except IOError: return None, None if isinstance(resource, BytesIO): return ( basename, lambda: (resource, loadtime, len(resource.getvalue())), ) return ( basename, lambda: ( resource, datetime.utcfromtimestamp(os.path.getmtime(resource.name)), os.path.getsize(resource.name), ), ) else: # Python 2 package_filename = provider.get_filename(package) is_filesystem = os.path.exists(package_filename) root = os.path.join(os.path.dirname(package_filename), package_path) def loader(path): if path is None: return None, None path = safe_join(root, path) basename = posixpath.basename(path) if is_filesystem: if not os.path.isfile(path): return None, None return basename, self._opener(path) try: data = provider.get_data(path) except IOError: return None, None return basename, lambda: (BytesIO(data), loadtime, len(data)) return loader def get_directory_loader(self, directory): def loader(path): if path is not None: path = safe_join(directory, path) else: path = directory if os.path.isfile(path): return os.path.basename(path), self._opener(path) return None, None return loader def generate_etag(self, mtime, file_size, real_filename): if not isinstance(real_filename, bytes): real_filename = real_filename.encode(get_filesystem_encoding()) return "wzsdm-%d-%s-%s" % ( mktime(mtime.timetuple()), file_size, adler32(real_filename) & 0xFFFFFFFF, ) def __call__(self, environ, start_response): path = get_path_info(environ) if PY2: path = path.encode(get_filesystem_encoding()) file_loader = None for search_path, loader in self.exports: if search_path == path: real_filename, file_loader = loader(None) if file_loader is not None: break if not search_path.endswith("/"): search_path += "/" if path.startswith(search_path): real_filename, file_loader = loader(path[len(search_path) :]) if file_loader is not None: break if file_loader is None or not self.is_allowed(real_filename): return self.app(environ, start_response) guessed_type = mimetypes.guess_type(real_filename) mime_type = get_content_type(guessed_type[0] or self.fallback_mimetype, "utf-8") f, mtime, file_size = file_loader() headers = [("Date", http_date())] if self.cache: timeout = self.cache_timeout etag = self.generate_etag(mtime, file_size, real_filename) headers += [ ("Etag", '"%s"' % etag), ("Cache-Control", "max-age=%d, public" % timeout), ] if not is_resource_modified(environ, etag, last_modified=mtime): f.close() start_response("304 Not Modified", headers) return [] headers.append(("Expires", http_date(time() + timeout))) else: headers.append(("Cache-Control", "public")) headers.extend( ( ("Content-Type", mime_type), ("Content-Length", str(file_size)), ("Last-Modified", http_date(mtime)), ) ) start_response("200 OK", headers) return wrap_file(environ, f)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/middleware/http_proxy.py
""" Basic HTTP Proxy ================ .. autoclass:: ProxyMiddleware :copyright: 2007 Pallets :license: BSD-3-Clause """ import socket from ..datastructures import EnvironHeaders from ..http import is_hop_by_hop_header from ..urls import url_parse from ..urls import url_quote from ..wsgi import get_input_stream try: from http import client except ImportError: import httplib as client class ProxyMiddleware(object): """Proxy requests under a path to an external server, routing other requests to the app. This middleware can only proxy HTTP requests, as that is the only protocol handled by the WSGI server. Other protocols, such as websocket requests, cannot be proxied at this layer. This should only be used for development, in production a real proxying server should be used. The middleware takes a dict that maps a path prefix to a dict describing the host to be proxied to:: app = ProxyMiddleware(app, { "/static/": { "target": "http://127.0.0.1:5001/", } }) Each host has the following options: ``target``: The target URL to dispatch to. This is required. ``remove_prefix``: Whether to remove the prefix from the URL before dispatching it to the target. The default is ``False``. ``host``: ``"<auto>"`` (default): The host header is automatically rewritten to the URL of the target. ``None``: The host header is unmodified from the client request. Any other value: The host header is overwritten with the value. ``headers``: A dictionary of headers to be sent with the request to the target. The default is ``{}``. ``ssl_context``: A :class:`ssl.SSLContext` defining how to verify requests if the target is HTTPS. The default is ``None``. In the example above, everything under ``"/static/"`` is proxied to the server on port 5001. The host header is rewritten to the target, and the ``"/static/"`` prefix is removed from the URLs. :param app: The WSGI application to wrap. :param targets: Proxy target configurations. See description above. :param chunk_size: Size of chunks to read from input stream and write to target. :param timeout: Seconds before an operation to a target fails. .. versionadded:: 0.14 """ def __init__(self, app, targets, chunk_size=2 << 13, timeout=10): def _set_defaults(opts): opts.setdefault("remove_prefix", False) opts.setdefault("host", "<auto>") opts.setdefault("headers", {}) opts.setdefault("ssl_context", None) return opts self.app = app self.targets = dict( ("/%s/" % k.strip("/"), _set_defaults(v)) for k, v in targets.items() ) self.chunk_size = chunk_size self.timeout = timeout def proxy_to(self, opts, path, prefix): target = url_parse(opts["target"]) def application(environ, start_response): headers = list(EnvironHeaders(environ).items()) headers[:] = [ (k, v) for k, v in headers if not is_hop_by_hop_header(k) and k.lower() not in ("content-length", "host") ] headers.append(("Connection", "close")) if opts["host"] == "<auto>": headers.append(("Host", target.ascii_host)) elif opts["host"] is None: headers.append(("Host", environ["HTTP_HOST"])) else: headers.append(("Host", opts["host"])) headers.extend(opts["headers"].items()) remote_path = path if opts["remove_prefix"]: remote_path = "%s/%s" % ( target.path.rstrip("/"), remote_path[len(prefix) :].lstrip("/"), ) content_length = environ.get("CONTENT_LENGTH") chunked = False if content_length not in ("", None): headers.append(("Content-Length", content_length)) elif content_length is not None: headers.append(("Transfer-Encoding", "chunked")) chunked = True try: if target.scheme == "http": con = client.HTTPConnection( target.ascii_host, target.port or 80, timeout=self.timeout ) elif target.scheme == "https": con = client.HTTPSConnection( target.ascii_host, target.port or 443, timeout=self.timeout, context=opts["ssl_context"], ) else: raise RuntimeError( "Target scheme must be 'http' or 'https', got '{}'.".format( target.scheme ) ) con.connect() remote_url = url_quote(remote_path) querystring = environ["QUERY_STRING"] if querystring: remote_url = remote_url + "?" + querystring con.putrequest(environ["REQUEST_METHOD"], remote_url, skip_host=True) for k, v in headers: if k.lower() == "connection": v = "close" con.putheader(k, v) con.endheaders() stream = get_input_stream(environ) while 1: data = stream.read(self.chunk_size) if not data: break if chunked: con.send(b"%x\r\n%s\r\n" % (len(data), data)) else: con.send(data) resp = con.getresponse() except socket.error: from ..exceptions import BadGateway return BadGateway()(environ, start_response) start_response( "%d %s" % (resp.status, resp.reason), [ (k.title(), v) for k, v in resp.getheaders() if not is_hop_by_hop_header(k) ], ) def read(): while 1: try: data = resp.read(self.chunk_size) except socket.error: break if not data: break yield data return read() return application def __call__(self, environ, start_response): path = environ["PATH_INFO"] app = self.app for prefix, opts in self.targets.items(): if path.startswith(prefix): app = self.proxy_to(opts, path, prefix) break return app(environ, start_response)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/middleware/__init__.py
""" Middleware ========== A WSGI middleware is a WSGI application that wraps another application in order to observe or change its behavior. Werkzeug provides some middleware for common use cases. .. toctree:: :maxdepth: 1 proxy_fix shared_data dispatcher http_proxy lint profiler The :doc:`interactive debugger </debug>` is also a middleware that can be applied manually, although it is typically used automatically with the :doc:`development server </serving>`. :copyright: 2007 Pallets :license: BSD-3-Clause """
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/middleware/profiler.py
""" Application Profiler ==================== This module provides a middleware that profiles each request with the :mod:`cProfile` module. This can help identify bottlenecks in your code that may be slowing down your application. .. autoclass:: ProfilerMiddleware :copyright: 2007 Pallets :license: BSD-3-Clause """ from __future__ import print_function import os.path import sys import time from pstats import Stats try: from cProfile import Profile except ImportError: from profile import Profile class ProfilerMiddleware(object): """Wrap a WSGI application and profile the execution of each request. Responses are buffered so that timings are more exact. If ``stream`` is given, :class:`pstats.Stats` are written to it after each request. If ``profile_dir`` is given, :mod:`cProfile` data files are saved to that directory, one file per request. The filename can be customized by passing ``filename_format``. If it is a string, it will be formatted using :meth:`str.format` with the following fields available: - ``{method}`` - The request method; GET, POST, etc. - ``{path}`` - The request path or 'root' should one not exist. - ``{elapsed}`` - The elapsed time of the request. - ``{time}`` - The time of the request. If it is a callable, it will be called with the WSGI ``environ`` dict and should return a filename. :param app: The WSGI application to wrap. :param stream: Write stats to this stream. Disable with ``None``. :param sort_by: A tuple of columns to sort stats by. See :meth:`pstats.Stats.sort_stats`. :param restrictions: A tuple of restrictions to filter stats by. See :meth:`pstats.Stats.print_stats`. :param profile_dir: Save profile data files to this directory. :param filename_format: Format string for profile data file names, or a callable returning a name. See explanation above. .. code-block:: python from werkzeug.middleware.profiler import ProfilerMiddleware app = ProfilerMiddleware(app) .. versionchanged:: 0.15 Stats are written even if ``profile_dir`` is given, and can be disable by passing ``stream=None``. .. versionadded:: 0.15 Added ``filename_format``. .. versionadded:: 0.9 Added ``restrictions`` and ``profile_dir``. """ def __init__( self, app, stream=sys.stdout, sort_by=("time", "calls"), restrictions=(), profile_dir=None, filename_format="{method}.{path}.{elapsed:.0f}ms.{time:.0f}.prof", ): self._app = app self._stream = stream self._sort_by = sort_by self._restrictions = restrictions self._profile_dir = profile_dir self._filename_format = filename_format def __call__(self, environ, start_response): response_body = [] def catching_start_response(status, headers, exc_info=None): start_response(status, headers, exc_info) return response_body.append def runapp(): app_iter = self._app(environ, catching_start_response) response_body.extend(app_iter) if hasattr(app_iter, "close"): app_iter.close() profile = Profile() start = time.time() profile.runcall(runapp) body = b"".join(response_body) elapsed = time.time() - start if self._profile_dir is not None: if callable(self._filename_format): filename = self._filename_format(environ) else: filename = self._filename_format.format( method=environ["REQUEST_METHOD"], path=( environ.get("PATH_INFO").strip("/").replace("/", ".") or "root" ), elapsed=elapsed * 1000.0, time=time.time(), ) filename = os.path.join(self._profile_dir, filename) profile.dump_stats(filename) if self._stream is not None: stats = Stats(profile, stream=self._stream) stats.sort_stats(*self._sort_by) print("-" * 80, file=self._stream) print("PATH: {!r}".format(environ.get("PATH_INFO", "")), file=self._stream) stats.print_stats(*self._restrictions) print("-" * 80 + "\n", file=self._stream) return [body]
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/middleware/lint.py
""" WSGI Protocol Linter ==================== This module provides a middleware that performs sanity checks on the behavior of the WSGI server and application. It checks that the :pep:`3333` WSGI spec is properly implemented. It also warns on some common HTTP errors such as non-empty responses for 304 status codes. .. autoclass:: LintMiddleware :copyright: 2007 Pallets :license: BSD-3-Clause """ from warnings import warn from .._compat import implements_iterator from .._compat import PY2 from .._compat import string_types from ..datastructures import Headers from ..http import is_entity_header from ..wsgi import FileWrapper try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse class WSGIWarning(Warning): """Warning class for WSGI warnings.""" class HTTPWarning(Warning): """Warning class for HTTP warnings.""" def check_string(context, obj, stacklevel=3): if type(obj) is not str: warn( "'%s' requires strings, got '%s'" % (context, type(obj).__name__), WSGIWarning, ) class InputStream(object): def __init__(self, stream): self._stream = stream def read(self, *args): if len(args) == 0: warn( "WSGI does not guarantee an EOF marker on the input stream, thus making" " calls to 'wsgi.input.read()' unsafe. Conforming servers may never" " return from this call.", WSGIWarning, stacklevel=2, ) elif len(args) != 1: warn( "Too many parameters passed to 'wsgi.input.read()'.", WSGIWarning, stacklevel=2, ) return self._stream.read(*args) def readline(self, *args): if len(args) == 0: warn( "Calls to 'wsgi.input.readline()' without arguments are unsafe. Use" " 'wsgi.input.read()' instead.", WSGIWarning, stacklevel=2, ) elif len(args) == 1: warn( "'wsgi.input.readline()' was called with a size hint. WSGI does not" " support this, although it's available on all major servers.", WSGIWarning, stacklevel=2, ) else: raise TypeError("Too many arguments passed to 'wsgi.input.readline()'.") return self._stream.readline(*args) def __iter__(self): try: return iter(self._stream) except TypeError: warn("'wsgi.input' is not iterable.", WSGIWarning, stacklevel=2) return iter(()) def close(self): warn("The application closed the input stream!", WSGIWarning, stacklevel=2) self._stream.close() class ErrorStream(object): def __init__(self, stream): self._stream = stream def write(self, s): check_string("wsgi.error.write()", s) self._stream.write(s) def flush(self): self._stream.flush() def writelines(self, seq): for line in seq: self.write(line) def close(self): warn("The application closed the error stream!", WSGIWarning, stacklevel=2) self._stream.close() class GuardedWrite(object): def __init__(self, write, chunks): self._write = write self._chunks = chunks def __call__(self, s): check_string("write()", s) self._write.write(s) self._chunks.append(len(s)) @implements_iterator class GuardedIterator(object): def __init__(self, iterator, headers_set, chunks): self._iterator = iterator if PY2: self._next = iter(iterator).next else: self._next = iter(iterator).__next__ self.closed = False self.headers_set = headers_set self.chunks = chunks def __iter__(self): return self def __next__(self): if self.closed: warn("Iterated over closed 'app_iter'.", WSGIWarning, stacklevel=2) rv = self._next() if not self.headers_set: warn( "The application returned before it started the response.", WSGIWarning, stacklevel=2, ) check_string("application iterator items", rv) self.chunks.append(len(rv)) return rv def close(self): self.closed = True if hasattr(self._iterator, "close"): self._iterator.close() if self.headers_set: status_code, headers = self.headers_set bytes_sent = sum(self.chunks) content_length = headers.get("content-length", type=int) if status_code == 304: for key, _value in headers: key = key.lower() if key not in ("expires", "content-location") and is_entity_header( key ): warn( "Entity header %r found in 304 response." % key, HTTPWarning ) if bytes_sent: warn("304 responses must not have a body.", HTTPWarning) elif 100 <= status_code < 200 or status_code == 204: if content_length != 0: warn( "%r responses must have an empty content length." % status_code, HTTPWarning, ) if bytes_sent: warn( "%r responses must not have a body." % status_code, HTTPWarning ) elif content_length is not None and content_length != bytes_sent: warn( "Content-Length and the number of bytes sent to the client do not" " match.", WSGIWarning, ) def __del__(self): if not self.closed: try: warn( "Iterator was garbage collected before it was closed.", WSGIWarning ) except Exception: pass class LintMiddleware(object): """Warns about common errors in the WSGI and HTTP behavior of the server and wrapped application. Some of the issues it check are: - invalid status codes - non-bytestrings sent to the WSGI server - strings returned from the WSGI application - non-empty conditional responses - unquoted etags - relative URLs in the Location header - unsafe calls to wsgi.input - unclosed iterators Error information is emitted using the :mod:`warnings` module. :param app: The WSGI application to wrap. .. code-block:: python from werkzeug.middleware.lint import LintMiddleware app = LintMiddleware(app) """ def __init__(self, app): self.app = app def check_environ(self, environ): if type(environ) is not dict: warn( "WSGI environment is not a standard Python dict.", WSGIWarning, stacklevel=4, ) for key in ( "REQUEST_METHOD", "SERVER_NAME", "SERVER_PORT", "wsgi.version", "wsgi.input", "wsgi.errors", "wsgi.multithread", "wsgi.multiprocess", "wsgi.run_once", ): if key not in environ: warn( "Required environment key %r not found" % key, WSGIWarning, stacklevel=3, ) if environ["wsgi.version"] != (1, 0): warn("Environ is not a WSGI 1.0 environ.", WSGIWarning, stacklevel=3) script_name = environ.get("SCRIPT_NAME", "") path_info = environ.get("PATH_INFO", "") if script_name and script_name[0] != "/": warn( "'SCRIPT_NAME' does not start with a slash: %r" % script_name, WSGIWarning, stacklevel=3, ) if path_info and path_info[0] != "/": warn( "'PATH_INFO' does not start with a slash: %r" % path_info, WSGIWarning, stacklevel=3, ) def check_start_response(self, status, headers, exc_info): check_string("status", status) status_code = status.split(None, 1)[0] if len(status_code) != 3 or not status_code.isdigit(): warn(WSGIWarning("Status code must be three digits"), stacklevel=3) if len(status) < 4 or status[3] != " ": warn( WSGIWarning( "Invalid value for status %r. Valid " "status strings are three digits, a space " "and a status explanation" ), stacklevel=3, ) status_code = int(status_code) if status_code < 100: warn(WSGIWarning("status code < 100 detected"), stacklevel=3) if type(headers) is not list: warn(WSGIWarning("header list is not a list"), stacklevel=3) for item in headers: if type(item) is not tuple or len(item) != 2: warn(WSGIWarning("Headers must tuple 2-item tuples"), stacklevel=3) name, value = item if type(name) is not str or type(value) is not str: warn(WSGIWarning("header items must be strings"), stacklevel=3) if name.lower() == "status": warn( WSGIWarning( "The status header is not supported due to " "conflicts with the CGI spec." ), stacklevel=3, ) if exc_info is not None and not isinstance(exc_info, tuple): warn(WSGIWarning("invalid value for exc_info"), stacklevel=3) headers = Headers(headers) self.check_headers(headers) return status_code, headers def check_headers(self, headers): etag = headers.get("etag") if etag is not None: if etag.startswith(("W/", "w/")): if etag.startswith("w/"): warn( HTTPWarning("weak etag indicator should be upcase."), stacklevel=4, ) etag = etag[2:] if not (etag[:1] == etag[-1:] == '"'): warn(HTTPWarning("unquoted etag emitted."), stacklevel=4) location = headers.get("location") if location is not None: if not urlparse(location).netloc: warn( HTTPWarning("absolute URLs required for location header"), stacklevel=4, ) def check_iterator(self, app_iter): if isinstance(app_iter, string_types): warn( "The application returned astring. The response will send one character" " at a time to the client, which will kill performance. Return a list" " or iterable instead.", WSGIWarning, stacklevel=3, ) def __call__(self, *args, **kwargs): if len(args) != 2: warn("A WSGI app takes two arguments.", WSGIWarning, stacklevel=2) if kwargs: warn( "A WSGI app does not take keyword arguments.", WSGIWarning, stacklevel=2 ) environ, start_response = args self.check_environ(environ) environ["wsgi.input"] = InputStream(environ["wsgi.input"]) environ["wsgi.errors"] = ErrorStream(environ["wsgi.errors"]) # Hook our own file wrapper in so that applications will always # iterate to the end and we can check the content length. environ["wsgi.file_wrapper"] = FileWrapper headers_set = [] chunks = [] def checking_start_response(*args, **kwargs): if len(args) not in (2, 3): warn( "Invalid number of arguments: %s, expected 2 or 3." % len(args), WSGIWarning, stacklevel=2, ) if kwargs: warn("'start_response' does not take keyword arguments.", WSGIWarning) status, headers = args[:2] if len(args) == 3: exc_info = args[2] else: exc_info = None headers_set[:] = self.check_start_response(status, headers, exc_info) return GuardedWrite(start_response(status, headers, exc_info), chunks) app_iter = self.app(environ, checking_start_response) self.check_iterator(app_iter) return GuardedIterator(app_iter, headers_set, chunks)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/wrappers/auth.py
from ..http import parse_authorization_header from ..http import parse_www_authenticate_header from ..utils import cached_property class AuthorizationMixin(object): """Adds an :attr:`authorization` property that represents the parsed value of the `Authorization` header as :class:`~werkzeug.datastructures.Authorization` object. """ @cached_property def authorization(self): """The `Authorization` object in parsed form.""" header = self.environ.get("HTTP_AUTHORIZATION") return parse_authorization_header(header) class WWWAuthenticateMixin(object): """Adds a :attr:`www_authenticate` property to a response object.""" @property def www_authenticate(self): """The `WWW-Authenticate` header in a parsed form.""" def on_update(www_auth): if not www_auth and "www-authenticate" in self.headers: del self.headers["www-authenticate"] elif www_auth: self.headers["WWW-Authenticate"] = www_auth.to_header() header = self.headers.get("www-authenticate") return parse_www_authenticate_header(header, on_update)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/werkzeug/wrappers/user_agent.py
from ..useragents import UserAgent from ..utils import cached_property class UserAgentMixin(object): """Adds a `user_agent` attribute to the request object which contains the parsed user agent of the browser that triggered the request as a :class:`~werkzeug.useragents.UserAgent` object. """ @cached_property def user_agent(self): """The current user agent.""" return UserAgent(self.environ)
0