content
stringlengths 1
103k
⌀ | path
stringlengths 8
216
| filename
stringlengths 2
179
| language
stringclasses 15
values | size_bytes
int64 2
189k
| quality_score
float64 0.5
0.95
| complexity
float64 0
1
| documentation_ratio
float64 0
1
| repository
stringclasses 5
values | stars
int64 0
1k
| created_date
stringdate 2023-07-10 19:21:08
2025-07-09 19:11:45
| license
stringclasses 4
values | is_test
bool 2
classes | file_hash
stringlengths 32
32
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
from jedi.inference.value import ModuleValue\nfrom jedi.inference.context import ModuleContext\n\n\nclass DocstringModule(ModuleValue):\n def __init__(self, in_module_context, **kwargs):\n super().__init__(**kwargs)\n self._in_module_context = in_module_context\n\n def _as_context(self):\n return DocstringModuleContext(self, self._in_module_context)\n\n\nclass DocstringModuleContext(ModuleContext):\n def __init__(self, module_value, in_module_context):\n super().__init__(module_value)\n self._in_module_context = in_module_context\n\n def get_filters(self, origin_scope=None, until_position=None):\n yield from super().get_filters(until_position=until_position)\n yield from self._in_module_context.get_filters()\n
|
.venv\Lib\site-packages\jedi\inference\docstring_utils.py
|
docstring_utils.py
|
Python
| 759 | 0.85 | 0.285714 | 0 |
vue-tools
| 215 |
2023-11-29T02:08:26.335879
|
Apache-2.0
| false |
99be28b9b30dfd9d92ccbe94726aa7f3
|
"""\nOne of the really important features of |jedi| is to have an option to\nunderstand code like this::\n\n def foo(bar):\n bar. # completion here\n foo(1)\n\nThere's no doubt wheter bar is an ``int`` or not, but if there's also a call\nlike ``foo('str')``, what would happen? Well, we'll just show both. Because\nthat's what a human would expect.\n\nIt works as follows:\n\n- |Jedi| sees a param\n- search for function calls named ``foo``\n- execute these calls and check the input.\n"""\n\nfrom jedi import settings\nfrom jedi import debug\nfrom jedi.parser_utils import get_parent_scope\nfrom jedi.inference.cache import inference_state_method_cache\nfrom jedi.inference.arguments import TreeArguments\nfrom jedi.inference.param import get_executed_param_names\nfrom jedi.inference.helpers import is_stdlib_path\nfrom jedi.inference.utils import to_list\nfrom jedi.inference.value import instance\nfrom jedi.inference.base_value import ValueSet, NO_VALUES\nfrom jedi.inference.references import get_module_contexts_containing_name\nfrom jedi.inference import recursion\n\n\nMAX_PARAM_SEARCHES = 20\n\n\ndef _avoid_recursions(func):\n def wrapper(function_value, param_index):\n inf = function_value.inference_state\n with recursion.execution_allowed(inf, function_value.tree_node) as allowed:\n # We need to catch recursions that may occur, because an\n # anonymous functions can create an anonymous parameter that is\n # more or less self referencing.\n if allowed:\n inf.dynamic_params_depth += 1\n try:\n return func(function_value, param_index)\n finally:\n inf.dynamic_params_depth -= 1\n return NO_VALUES\n return wrapper\n\n\n@debug.increase_indent\n@_avoid_recursions\ndef dynamic_param_lookup(function_value, param_index):\n """\n A dynamic search for param values. If you try to complete a type:\n\n >>> def func(foo):\n ... foo\n >>> func(1)\n >>> func("")\n\n It is not known what the type ``foo`` without analysing the whole code. You\n have to look for all calls to ``func`` to find out what ``foo`` possibly\n is.\n """\n if not function_value.inference_state.do_dynamic_params_search:\n return NO_VALUES\n\n funcdef = function_value.tree_node\n\n path = function_value.get_root_context().py__file__()\n if path is not None and is_stdlib_path(path):\n # We don't want to search for references in the stdlib. Usually people\n # don't work with it (except if you are a core maintainer, sorry).\n # This makes everything slower. Just disable it and run the tests,\n # you will see the slowdown, especially in 3.6.\n return NO_VALUES\n\n if funcdef.type == 'lambdef':\n string_name = _get_lambda_name(funcdef)\n if string_name is None:\n return NO_VALUES\n else:\n string_name = funcdef.name.value\n debug.dbg('Dynamic param search in %s.', string_name, color='MAGENTA')\n\n module_context = function_value.get_root_context()\n arguments_list = _search_function_arguments(module_context, funcdef, string_name)\n values = ValueSet.from_sets(\n get_executed_param_names(\n function_value, arguments\n )[param_index].infer()\n for arguments in arguments_list\n )\n debug.dbg('Dynamic param result finished', color='MAGENTA')\n return values\n\n\n@inference_state_method_cache(default=None)\n@to_list\ndef _search_function_arguments(module_context, funcdef, string_name):\n """\n Returns a list of param names.\n """\n compare_node = funcdef\n if string_name == '__init__':\n cls = get_parent_scope(funcdef)\n if cls.type == 'classdef':\n string_name = cls.name.value\n compare_node = cls\n\n found_arguments = False\n i = 0\n inference_state = module_context.inference_state\n\n if settings.dynamic_params_for_other_modules:\n module_contexts = get_module_contexts_containing_name(\n inference_state, [module_context], string_name,\n # Limit the amounts of files to be opened massively.\n limit_reduction=5,\n )\n else:\n module_contexts = [module_context]\n\n for for_mod_context in module_contexts:\n for name, trailer in _get_potential_nodes(for_mod_context, string_name):\n i += 1\n\n # This is a simple way to stop Jedi's dynamic param recursion\n # from going wild: The deeper Jedi's in the recursion, the less\n # code should be inferred.\n if i * inference_state.dynamic_params_depth > MAX_PARAM_SEARCHES:\n return\n\n random_context = for_mod_context.create_context(name)\n for arguments in _check_name_for_execution(\n inference_state, random_context, compare_node, name, trailer):\n found_arguments = True\n yield arguments\n\n # If there are results after processing a module, we're probably\n # good to process. This is a speed optimization.\n if found_arguments:\n return\n\n\ndef _get_lambda_name(node):\n stmt = node.parent\n if stmt.type == 'expr_stmt':\n first_operator = next(stmt.yield_operators(), None)\n if first_operator == '=':\n first = stmt.children[0]\n if first.type == 'name':\n return first.value\n\n return None\n\n\ndef _get_potential_nodes(module_value, func_string_name):\n try:\n names = module_value.tree_node.get_used_names()[func_string_name]\n except KeyError:\n return\n\n for name in names:\n bracket = name.get_next_leaf()\n trailer = bracket.parent\n if trailer.type == 'trailer' and bracket == '(':\n yield name, trailer\n\n\ndef _check_name_for_execution(inference_state, context, compare_node, name, trailer):\n from jedi.inference.value.function import BaseFunctionExecutionContext\n\n def create_args(value):\n arglist = trailer.children[1]\n if arglist == ')':\n arglist = None\n args = TreeArguments(inference_state, context, arglist, trailer)\n from jedi.inference.value.instance import InstanceArguments\n if value.tree_node.type == 'classdef':\n created_instance = instance.TreeInstance(\n inference_state,\n value.parent_context,\n value,\n args\n )\n return InstanceArguments(created_instance, args)\n else:\n if value.is_bound_method():\n args = InstanceArguments(value.instance, args)\n return args\n\n for value in inference_state.infer(context, name):\n value_node = value.tree_node\n if compare_node == value_node:\n yield create_args(value)\n elif isinstance(value.parent_context, BaseFunctionExecutionContext) \\n and compare_node.type == 'funcdef':\n # Here we're trying to find decorators by checking the first\n # parameter. It's not very generic though. Should find a better\n # solution that also applies to nested decorators.\n param_names = value.parent_context.get_param_names()\n if len(param_names) != 1:\n continue\n values = param_names[0].infer()\n if [v.tree_node for v in values] == [compare_node]:\n # Found a decorator.\n module_context = context.get_root_context()\n execution_context = value.as_context(create_args(value))\n potential_nodes = _get_potential_nodes(module_context, param_names[0].string_name)\n for name, trailer in potential_nodes:\n if value_node.start_pos < name.start_pos < value_node.end_pos:\n random_context = execution_context.create_context(name)\n yield from _check_name_for_execution(\n inference_state,\n random_context,\n compare_node,\n name,\n trailer\n )\n
|
.venv\Lib\site-packages\jedi\inference\dynamic_params.py
|
dynamic_params.py
|
Python
| 8,154 | 0.95 | 0.227679 | 0.089947 |
python-kit
| 436 |
2023-09-01T02:35:11.871616
|
MIT
| false |
b8c3047103b1e473acb81954b5c91b34
|
"""\nSearching for names with given scope and name. This is very central in Jedi and\nPython. The name resolution is quite complicated with descripter,\n``__getattribute__``, ``__getattr__``, ``global``, etc.\n\nIf you want to understand name resolution, please read the first few chapters\nin http://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/.\n\nFlow checks\n+++++++++++\n\nFlow checks are not really mature. There's only a check for ``isinstance``. It\nwould check whether a flow has the form of ``if isinstance(a, type_or_tuple)``.\nUnfortunately every other thing is being ignored (e.g. a == '' would be easy to\ncheck for -> a is a string). There's big potential in these checks.\n"""\n\nfrom parso.tree import search_ancestor\nfrom parso.python.tree import Name\n\nfrom jedi import settings\nfrom jedi.inference.arguments import TreeArguments\nfrom jedi.inference.value import iterable\nfrom jedi.inference.base_value import NO_VALUES\nfrom jedi.parser_utils import is_scope\n\n\ndef filter_name(filters, name_or_str):\n """\n Searches names that are defined in a scope (the different\n ``filters``), until a name fits.\n """\n string_name = name_or_str.value if isinstance(name_or_str, Name) else name_or_str\n names = []\n for filter in filters:\n names = filter.get(string_name)\n if names:\n break\n\n return list(_remove_del_stmt(names))\n\n\ndef _remove_del_stmt(names):\n # Catch del statements and remove them from results.\n for name in names:\n if name.tree_name is not None:\n definition = name.tree_name.get_definition()\n if definition is not None and definition.type == 'del_stmt':\n continue\n yield name\n\n\ndef check_flow_information(value, flow, search_name, pos):\n """ Try to find out the type of a variable just with the information that\n is given by the flows: e.g. It is also responsible for assert checks.::\n\n if isinstance(k, str):\n k. # <- completion here\n\n ensures that `k` is a string.\n """\n if not settings.dynamic_flow_information:\n return None\n\n result = None\n if is_scope(flow):\n # Check for asserts.\n module_node = flow.get_root_node()\n try:\n names = module_node.get_used_names()[search_name.value]\n except KeyError:\n return None\n names = reversed([\n n for n in names\n if flow.start_pos <= n.start_pos < (pos or flow.end_pos)\n ])\n\n for name in names:\n ass = search_ancestor(name, 'assert_stmt')\n if ass is not None:\n result = _check_isinstance_type(value, ass.assertion, search_name)\n if result is not None:\n return result\n\n if flow.type in ('if_stmt', 'while_stmt'):\n potential_ifs = [c for c in flow.children[1::4] if c != ':']\n for if_test in reversed(potential_ifs):\n if search_name.start_pos > if_test.end_pos:\n return _check_isinstance_type(value, if_test, search_name)\n return result\n\n\ndef _get_isinstance_trailer_arglist(node):\n if node.type in ('power', 'atom_expr') and len(node.children) == 2:\n # This might be removed if we analyze and, etc\n first, trailer = node.children\n if first.type == 'name' and first.value == 'isinstance' \\n and trailer.type == 'trailer' and trailer.children[0] == '(':\n return trailer\n return None\n\n\ndef _check_isinstance_type(value, node, search_name):\n lazy_cls = None\n trailer = _get_isinstance_trailer_arglist(node)\n if trailer is not None and len(trailer.children) == 3:\n arglist = trailer.children[1]\n args = TreeArguments(value.inference_state, value, arglist, trailer)\n param_list = list(args.unpack())\n # Disallow keyword arguments\n if len(param_list) == 2 and len(arglist.children) == 3:\n (key1, _), (key2, lazy_value_cls) = param_list\n if key1 is None and key2 is None:\n call = _get_call_string(search_name)\n is_instance_call = _get_call_string(arglist.children[0])\n # Do a simple get_code comparison of the strings . They should\n # just have the same code, and everything will be all right.\n # There are ways that this is not correct, if some stuff is\n # redefined in between. However here we don't care, because\n # it's a heuristic that works pretty well.\n if call == is_instance_call:\n lazy_cls = lazy_value_cls\n if lazy_cls is None:\n return None\n\n value_set = NO_VALUES\n for cls_or_tup in lazy_cls.infer():\n if isinstance(cls_or_tup, iterable.Sequence) and cls_or_tup.array_type == 'tuple':\n for lazy_value in cls_or_tup.py__iter__():\n value_set |= lazy_value.infer().execute_with_values()\n else:\n value_set |= cls_or_tup.execute_with_values()\n return value_set\n\n\ndef _get_call_string(node):\n if node.parent.type == 'atom_expr':\n return _get_call_string(node.parent)\n\n code = ''\n leaf = node.get_first_leaf()\n end = node.get_last_leaf().end_pos\n while leaf.start_pos < end:\n code += leaf.value\n leaf = leaf.get_next_leaf()\n return code\n
|
.venv\Lib\site-packages\jedi\inference\finder.py
|
finder.py
|
Python
| 5,326 | 0.95 | 0.315068 | 0.07438 |
node-utils
| 591 |
2023-07-17T20:25:32.228558
|
Apache-2.0
| false |
a1bc08adeca45a79d45e1f58dc5929b5
|
from typing import Dict, Optional\n\nfrom jedi.parser_utils import get_flow_branch_keyword, is_scope, get_parent_scope\nfrom jedi.inference.recursion import execution_allowed\nfrom jedi.inference.helpers import is_big_annoying_library\n\n\nclass Status:\n lookup_table: Dict[Optional[bool], 'Status'] = {}\n\n def __init__(self, value: Optional[bool], name: str) -> None:\n self._value = value\n self._name = name\n Status.lookup_table[value] = self\n\n def invert(self):\n if self is REACHABLE:\n return UNREACHABLE\n elif self is UNREACHABLE:\n return REACHABLE\n else:\n return UNSURE\n\n def __and__(self, other):\n if UNSURE in (self, other):\n return UNSURE\n else:\n return REACHABLE if self._value and other._value else UNREACHABLE\n\n def __repr__(self):\n return '<%s: %s>' % (type(self).__name__, self._name)\n\n\nREACHABLE = Status(True, 'reachable')\nUNREACHABLE = Status(False, 'unreachable')\nUNSURE = Status(None, 'unsure')\n\n\ndef _get_flow_scopes(node):\n while True:\n node = get_parent_scope(node, include_flows=True)\n if node is None or is_scope(node):\n return\n yield node\n\n\ndef reachability_check(context, value_scope, node, origin_scope=None):\n if is_big_annoying_library(context) \\n or not context.inference_state.flow_analysis_enabled:\n return UNSURE\n\n first_flow_scope = get_parent_scope(node, include_flows=True)\n if origin_scope is not None:\n origin_flow_scopes = list(_get_flow_scopes(origin_scope))\n node_flow_scopes = list(_get_flow_scopes(node))\n\n branch_matches = True\n for flow_scope in origin_flow_scopes:\n if flow_scope in node_flow_scopes:\n node_keyword = get_flow_branch_keyword(flow_scope, node)\n origin_keyword = get_flow_branch_keyword(flow_scope, origin_scope)\n branch_matches = node_keyword == origin_keyword\n if flow_scope.type == 'if_stmt':\n if not branch_matches:\n return UNREACHABLE\n elif flow_scope.type == 'try_stmt':\n if not branch_matches and origin_keyword == 'else' \\n and node_keyword == 'except':\n return UNREACHABLE\n if branch_matches:\n break\n\n # Direct parents get resolved, we filter scopes that are separate\n # branches. This makes sense for autocompletion and static analysis.\n # For actual Python it doesn't matter, because we're talking about\n # potentially unreachable code.\n # e.g. `if 0:` would cause all name lookup within the flow make\n # unaccessible. This is not a "problem" in Python, because the code is\n # never called. In Jedi though, we still want to infer types.\n while origin_scope is not None:\n if first_flow_scope == origin_scope and branch_matches:\n return REACHABLE\n origin_scope = origin_scope.parent\n\n return _break_check(context, value_scope, first_flow_scope, node)\n\n\ndef _break_check(context, value_scope, flow_scope, node):\n reachable = REACHABLE\n if flow_scope.type == 'if_stmt':\n if flow_scope.is_node_after_else(node):\n for check_node in flow_scope.get_test_nodes():\n reachable = _check_if(context, check_node)\n if reachable in (REACHABLE, UNSURE):\n break\n reachable = reachable.invert()\n else:\n flow_node = flow_scope.get_corresponding_test_node(node)\n if flow_node is not None:\n reachable = _check_if(context, flow_node)\n elif flow_scope.type in ('try_stmt', 'while_stmt'):\n return UNSURE\n\n # Only reachable branches need to be examined further.\n if reachable in (UNREACHABLE, UNSURE):\n return reachable\n\n if value_scope != flow_scope and value_scope != flow_scope.parent:\n flow_scope = get_parent_scope(flow_scope, include_flows=True)\n return reachable & _break_check(context, value_scope, flow_scope, node)\n else:\n return reachable\n\n\ndef _check_if(context, node):\n with execution_allowed(context.inference_state, node) as allowed:\n if not allowed:\n return UNSURE\n\n types = context.infer_node(node)\n values = set(x.py__bool__() for x in types)\n if len(values) == 1:\n return Status.lookup_table[values.pop()]\n else:\n return UNSURE\n
|
.venv\Lib\site-packages\jedi\inference\flow_analysis.py
|
flow_analysis.py
|
Python
| 4,583 | 0.95 | 0.288 | 0.079208 |
python-kit
| 667 |
2025-06-22T14:15:16.340986
|
Apache-2.0
| false |
dc0668ed2a95e3fd80656cdf8ca0900b
|
import copy\nimport sys\nimport re\nimport os\nfrom itertools import chain\nfrom contextlib import contextmanager\n\nfrom parso.python import tree\n\n\ndef is_stdlib_path(path):\n # Python standard library paths look like this:\n # /usr/lib/python3.9/...\n # TODO The implementation below is probably incorrect and not complete.\n parts = path.parts\n if 'dist-packages' in parts or 'site-packages' in parts:\n return False\n\n base_path = os.path.join(sys.prefix, 'lib', 'python')\n return bool(re.match(re.escape(base_path) + r'\d.\d', str(path)))\n\n\ndef deep_ast_copy(obj):\n """\n Much, much faster than copy.deepcopy, but just for parser tree nodes.\n """\n # If it's already in the cache, just return it.\n new_obj = copy.copy(obj)\n\n # Copy children\n new_children = []\n for child in obj.children:\n if isinstance(child, tree.Leaf):\n new_child = copy.copy(child)\n new_child.parent = new_obj\n else:\n new_child = deep_ast_copy(child)\n new_child.parent = new_obj\n new_children.append(new_child)\n new_obj.children = new_children\n\n return new_obj\n\n\ndef infer_call_of_leaf(context, leaf, cut_own_trailer=False):\n """\n Creates a "call" node that consist of all ``trailer`` and ``power``\n objects. E.g. if you call it with ``append``::\n\n list([]).append(3) or None\n\n You would get a node with the content ``list([]).append`` back.\n\n This generates a copy of the original ast node.\n\n If you're using the leaf, e.g. the bracket `)` it will return ``list([])``.\n\n We use this function for two purposes. Given an expression ``bar.foo``,\n we may want to\n - infer the type of ``foo`` to offer completions after foo\n - infer the type of ``bar`` to be able to jump to the definition of foo\n The option ``cut_own_trailer`` must be set to true for the second purpose.\n """\n trailer = leaf.parent\n if trailer.type == 'fstring':\n from jedi.inference import compiled\n return compiled.get_string_value_set(context.inference_state)\n\n # The leaf may not be the last or first child, because there exist three\n # different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples\n # we should not match anything more than x.\n if trailer.type != 'trailer' or leaf not in (trailer.children[0], trailer.children[-1]):\n if leaf == ':':\n # Basically happens with foo[:] when the cursor is on the colon\n from jedi.inference.base_value import NO_VALUES\n return NO_VALUES\n if trailer.type == 'atom':\n return context.infer_node(trailer)\n return context.infer_node(leaf)\n\n power = trailer.parent\n index = power.children.index(trailer)\n if cut_own_trailer:\n cut = index\n else:\n cut = index + 1\n\n if power.type == 'error_node':\n start = index\n while True:\n start -= 1\n base = power.children[start]\n if base.type != 'trailer':\n break\n trailers = power.children[start + 1:cut]\n else:\n base = power.children[0]\n trailers = power.children[1:cut]\n\n if base == 'await':\n base = trailers[0]\n trailers = trailers[1:]\n\n values = context.infer_node(base)\n from jedi.inference.syntax_tree import infer_trailer\n for trailer in trailers:\n values = infer_trailer(context, values, trailer)\n return values\n\n\ndef get_names_of_node(node):\n try:\n children = node.children\n except AttributeError:\n if node.type == 'name':\n return [node]\n else:\n return []\n else:\n return list(chain.from_iterable(get_names_of_node(c) for c in children))\n\n\ndef is_string(value):\n return value.is_compiled() and isinstance(value.get_safe_value(default=None), str)\n\n\ndef is_literal(value):\n return is_number(value) or is_string(value)\n\n\ndef _get_safe_value_or_none(value, accept):\n value = value.get_safe_value(default=None)\n if isinstance(value, accept):\n return value\n\n\ndef get_int_or_none(value):\n return _get_safe_value_or_none(value, int)\n\n\ndef get_str_or_none(value):\n return _get_safe_value_or_none(value, str)\n\n\ndef is_number(value):\n return _get_safe_value_or_none(value, (int, float)) is not None\n\n\nclass SimpleGetItemNotFound(Exception):\n pass\n\n\n@contextmanager\ndef reraise_getitem_errors(*exception_classes):\n try:\n yield\n except exception_classes as e:\n raise SimpleGetItemNotFound(e)\n\n\ndef parse_dotted_names(nodes, is_import_from, until_node=None):\n level = 0\n names = []\n for node in nodes[1:]:\n if node in ('.', '...'):\n if not names:\n level += len(node.value)\n elif node.type == 'dotted_name':\n for n in node.children[::2]:\n names.append(n)\n if n is until_node:\n break\n else:\n continue\n break\n elif node.type == 'name':\n names.append(node)\n if node is until_node:\n break\n elif node == ',':\n if not is_import_from:\n names = []\n else:\n # Here if the keyword `import` comes along it stops checking\n # for names.\n break\n return level, names\n\n\ndef values_from_qualified_names(inference_state, *names):\n return inference_state.import_module(names[:-1]).py__getattribute__(names[-1])\n\n\ndef is_big_annoying_library(context):\n string_names = context.get_root_context().string_names\n if string_names is None:\n return False\n\n # Especially pandas and tensorflow are huge complicated Python libraries\n # that get even slower than they already are when Jedi tries to undrstand\n # dynamic features like decorators, ifs and other stuff.\n return string_names[0] in ('pandas', 'numpy', 'tensorflow', 'matplotlib')\n
|
.venv\Lib\site-packages\jedi\inference\helpers.py
|
helpers.py
|
Python
| 5,943 | 0.95 | 0.237624 | 0.089172 |
node-utils
| 171 |
2024-04-30T00:31:30.934134
|
Apache-2.0
| false |
f9b425a1468bd8da52067139d455fcf9
|
"""\n:mod:`jedi.inference.imports` is here to resolve import statements and return\nthe modules/classes/functions/whatever, which they stand for. However there's\nnot any actual importing done. This module is about finding modules in the\nfilesystem. This can be quite tricky sometimes, because Python imports are not\nalways that simple.\n\nThis module also supports import autocompletion, which means to complete\nstatements like ``from datetim`` (cursor at the end would return ``datetime``).\n"""\nimport os\nfrom pathlib import Path\n\nfrom parso.python import tree\nfrom parso.tree import search_ancestor\n\nfrom jedi import debug\nfrom jedi import settings\nfrom jedi.file_io import FolderIO\nfrom jedi.parser_utils import get_cached_code_lines\nfrom jedi.inference import sys_path\nfrom jedi.inference import helpers\nfrom jedi.inference import compiled\nfrom jedi.inference import analysis\nfrom jedi.inference.utils import unite\nfrom jedi.inference.cache import inference_state_method_cache\nfrom jedi.inference.names import ImportName, SubModuleName\nfrom jedi.inference.base_value import ValueSet, NO_VALUES\nfrom jedi.inference.gradual.typeshed import import_module_decorator, \\n create_stub_module, parse_stub_module\nfrom jedi.inference.compiled.subprocess.functions import ImplicitNSInfo\nfrom jedi.plugins import plugin_manager\n\n\nclass ModuleCache:\n def __init__(self):\n self._name_cache = {}\n\n def add(self, string_names, value_set):\n if string_names is not None:\n self._name_cache[string_names] = value_set\n\n def get(self, string_names):\n return self._name_cache.get(string_names)\n\n\n# This memoization is needed, because otherwise we will infinitely loop on\n# certain imports.\n@inference_state_method_cache(default=NO_VALUES)\ndef infer_import(context, tree_name):\n module_context = context.get_root_context()\n from_import_name, import_path, level, values = \\n _prepare_infer_import(module_context, tree_name)\n if values:\n\n if from_import_name is not None:\n values = values.py__getattribute__(\n from_import_name,\n name_context=context,\n analysis_errors=False\n )\n\n if not values:\n path = import_path + (from_import_name,)\n importer = Importer(context.inference_state, path, module_context, level)\n values = importer.follow()\n debug.dbg('after import: %s', values)\n return values\n\n\n@inference_state_method_cache(default=[])\ndef goto_import(context, tree_name):\n module_context = context.get_root_context()\n from_import_name, import_path, level, values = \\n _prepare_infer_import(module_context, tree_name)\n if not values:\n return []\n\n if from_import_name is not None:\n names = unite([\n c.goto(\n from_import_name,\n name_context=context,\n analysis_errors=False\n ) for c in values\n ])\n # Avoid recursion on the same names.\n if names and not any(n.tree_name is tree_name for n in names):\n return names\n\n path = import_path + (from_import_name,)\n importer = Importer(context.inference_state, path, module_context, level)\n values = importer.follow()\n return set(s.name for s in values)\n\n\ndef _prepare_infer_import(module_context, tree_name):\n import_node = search_ancestor(tree_name, 'import_name', 'import_from')\n import_path = import_node.get_path_for_name(tree_name)\n from_import_name = None\n try:\n from_names = import_node.get_from_names()\n except AttributeError:\n # Is an import_name\n pass\n else:\n if len(from_names) + 1 == len(import_path):\n # We have to fetch the from_names part first and then check\n # if from_names exists in the modules.\n from_import_name = import_path[-1]\n import_path = from_names\n\n importer = Importer(module_context.inference_state, tuple(import_path),\n module_context, import_node.level)\n\n return from_import_name, tuple(import_path), import_node.level, importer.follow()\n\n\ndef _add_error(value, name, message):\n if hasattr(name, 'parent') and value is not None:\n analysis.add(value, 'import-error', name, message)\n else:\n debug.warning('ImportError without origin: ' + message)\n\n\ndef _level_to_base_import_path(project_path, directory, level):\n """\n In case the level is outside of the currently known package (something like\n import .....foo), we can still try our best to help the user for\n completions.\n """\n for i in range(level - 1):\n old = directory\n directory = os.path.dirname(directory)\n if old == directory:\n return None, None\n\n d = directory\n level_import_paths = []\n # Now that we are on the level that the user wants to be, calculate the\n # import path for it.\n while True:\n if d == project_path:\n return level_import_paths, d\n dir_name = os.path.basename(d)\n if dir_name:\n level_import_paths.insert(0, dir_name)\n d = os.path.dirname(d)\n else:\n return None, directory\n\n\nclass Importer:\n def __init__(self, inference_state, import_path, module_context, level=0):\n """\n An implementation similar to ``__import__``. Use `follow`\n to actually follow the imports.\n\n *level* specifies whether to use absolute or relative imports. 0 (the\n default) means only perform absolute imports. Positive values for level\n indicate the number of parent directories to search relative to the\n directory of the module calling ``__import__()`` (see PEP 328 for the\n details).\n\n :param import_path: List of namespaces (strings or Names).\n """\n debug.speed('import %s %s' % (import_path, module_context))\n self._inference_state = inference_state\n self.level = level\n self._module_context = module_context\n\n self._fixed_sys_path = None\n self._infer_possible = True\n if level:\n base = module_context.get_value().py__package__()\n # We need to care for two cases, the first one is if it's a valid\n # Python import. This import has a properly defined module name\n # chain like `foo.bar.baz` and an import in baz is made for\n # `..lala.` It can then resolve to `foo.bar.lala`.\n # The else here is a heuristic for all other cases, if for example\n # in `foo` you search for `...bar`, it's obviously out of scope.\n # However since Jedi tries to just do it's best, we help the user\n # here, because he might have specified something wrong in his\n # project.\n if level <= len(base):\n # Here we basically rewrite the level to 0.\n base = tuple(base)\n if level > 1:\n base = base[:-level + 1]\n import_path = base + tuple(import_path)\n else:\n path = module_context.py__file__()\n project_path = self._inference_state.project.path\n import_path = list(import_path)\n if path is None:\n # If no path is defined, our best guess is that the current\n # file is edited by a user on the current working\n # directory. We need to add an initial path, because it\n # will get removed as the name of the current file.\n directory = project_path\n else:\n directory = os.path.dirname(path)\n\n base_import_path, base_directory = _level_to_base_import_path(\n project_path, directory, level,\n )\n if base_directory is None:\n # Everything is lost, the relative import does point\n # somewhere out of the filesystem.\n self._infer_possible = False\n else:\n self._fixed_sys_path = [base_directory]\n\n if base_import_path is None:\n if import_path:\n _add_error(\n module_context, import_path[0],\n message='Attempted relative import beyond top-level package.'\n )\n else:\n import_path = base_import_path + import_path\n self.import_path = import_path\n\n @property\n def _str_import_path(self):\n """Returns the import path as pure strings instead of `Name`."""\n return tuple(\n name.value if isinstance(name, tree.Name) else name\n for name in self.import_path\n )\n\n def _sys_path_with_modifications(self, is_completion):\n if self._fixed_sys_path is not None:\n return self._fixed_sys_path\n\n return (\n # For import completions we don't want to see init paths, but for\n # inference we want to show the user as much as possible.\n # See GH #1446.\n self._inference_state.get_sys_path(add_init_paths=not is_completion)\n + [\n str(p) for p\n in sys_path.check_sys_path_modifications(self._module_context)\n ]\n )\n\n def follow(self):\n if not self.import_path:\n if self._fixed_sys_path:\n # This is a bit of a special case, that maybe should be\n # revisited. If the project path is wrong or the user uses\n # relative imports the wrong way, we might end up here, where\n # the `fixed_sys_path == project.path` in that case we kind of\n # use the project.path.parent directory as our path. This is\n # usually not a problem, except if imports in other places are\n # using the same names. Example:\n #\n # foo/ < #1\n # - setup.py\n # - foo/ < #2\n # - __init__.py\n # - foo.py < #3\n #\n # If the top foo is our project folder and somebody uses\n # `from . import foo` in `setup.py`, it will resolve to foo #2,\n # which means that the import for foo.foo is cached as\n # `__init__.py` (#2) and not as `foo.py` (#3). This is usually\n # not an issue, because this case is probably pretty rare, but\n # might be an issue for some people.\n #\n # However for most normal cases where we work with different\n # file names, this code path hits where we basically change the\n # project path to an ancestor of project path.\n from jedi.inference.value.namespace import ImplicitNamespaceValue\n import_path = (os.path.basename(self._fixed_sys_path[0]),)\n ns = ImplicitNamespaceValue(\n self._inference_state,\n string_names=import_path,\n paths=self._fixed_sys_path,\n )\n return ValueSet({ns})\n return NO_VALUES\n if not self._infer_possible:\n return NO_VALUES\n\n # Check caches first\n from_cache = self._inference_state.stub_module_cache.get(self._str_import_path)\n if from_cache is not None:\n return ValueSet({from_cache})\n from_cache = self._inference_state.module_cache.get(self._str_import_path)\n if from_cache is not None:\n return from_cache\n\n sys_path = self._sys_path_with_modifications(is_completion=False)\n\n return import_module_by_names(\n self._inference_state, self.import_path, sys_path, self._module_context\n )\n\n def _get_module_names(self, search_path=None, in_module=None):\n """\n Get the names of all modules in the search_path. This means file names\n and not names defined in the files.\n """\n if search_path is None:\n sys_path = self._sys_path_with_modifications(is_completion=True)\n else:\n sys_path = search_path\n return list(iter_module_names(\n self._inference_state, self._module_context, sys_path,\n module_cls=ImportName if in_module is None else SubModuleName,\n add_builtin_modules=search_path is None and in_module is None,\n ))\n\n def completion_names(self, inference_state, only_modules=False):\n """\n :param only_modules: Indicates wheter it's possible to import a\n definition that is not defined in a module.\n """\n if not self._infer_possible:\n return []\n\n names = []\n if self.import_path:\n # flask\n if self._str_import_path == ('flask', 'ext'):\n # List Flask extensions like ``flask_foo``\n for mod in self._get_module_names():\n modname = mod.string_name\n if modname.startswith('flask_'):\n extname = modname[len('flask_'):]\n names.append(ImportName(self._module_context, extname))\n # Now the old style: ``flaskext.foo``\n for dir in self._sys_path_with_modifications(is_completion=True):\n flaskext = os.path.join(dir, 'flaskext')\n if os.path.isdir(flaskext):\n names += self._get_module_names([flaskext])\n\n values = self.follow()\n for value in values:\n # Non-modules are not completable.\n if value.api_type not in ('module', 'namespace'): # not a module\n continue\n if not value.is_compiled():\n # sub_modules_dict is not implemented for compiled modules.\n names += value.sub_modules_dict().values()\n\n if not only_modules:\n from jedi.inference.gradual.conversion import convert_values\n\n both_values = values | convert_values(values)\n for c in both_values:\n for filter in c.get_filters():\n names += filter.values()\n else:\n if self.level:\n # We only get here if the level cannot be properly calculated.\n names += self._get_module_names(self._fixed_sys_path)\n else:\n # This is just the list of global imports.\n names += self._get_module_names()\n return names\n\n\ndef import_module_by_names(inference_state, import_names, sys_path=None,\n module_context=None, prefer_stubs=True):\n if sys_path is None:\n sys_path = inference_state.get_sys_path()\n\n str_import_names = tuple(\n i.value if isinstance(i, tree.Name) else i\n for i in import_names\n )\n value_set = [None]\n for i, name in enumerate(import_names):\n value_set = ValueSet.from_sets([\n import_module(\n inference_state,\n str_import_names[:i+1],\n parent_module_value,\n sys_path,\n prefer_stubs=prefer_stubs,\n ) for parent_module_value in value_set\n ])\n if not value_set:\n message = 'No module named ' + '.'.join(str_import_names)\n if module_context is not None:\n _add_error(module_context, name, message)\n else:\n debug.warning(message)\n return NO_VALUES\n return value_set\n\n\n@plugin_manager.decorate()\n@import_module_decorator\ndef import_module(inference_state, import_names, parent_module_value, sys_path):\n """\n This method is very similar to importlib's `_gcd_import`.\n """\n if import_names[0] in settings.auto_import_modules:\n module = _load_builtin_module(inference_state, import_names, sys_path)\n if module is None:\n return NO_VALUES\n return ValueSet([module])\n\n module_name = '.'.join(import_names)\n if parent_module_value is None:\n # Override the sys.path. It works only good that way.\n # Injecting the path directly into `find_module` did not work.\n file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info(\n string=import_names[-1],\n full_name=module_name,\n sys_path=sys_path,\n is_global_search=True,\n )\n if is_pkg is None:\n return NO_VALUES\n else:\n paths = parent_module_value.py__path__()\n if paths is None:\n # The module might not be a package.\n return NO_VALUES\n\n file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info(\n string=import_names[-1],\n path=paths,\n full_name=module_name,\n is_global_search=False,\n )\n if is_pkg is None:\n return NO_VALUES\n\n if isinstance(file_io_or_ns, ImplicitNSInfo):\n from jedi.inference.value.namespace import ImplicitNamespaceValue\n module = ImplicitNamespaceValue(\n inference_state,\n string_names=tuple(file_io_or_ns.name.split('.')),\n paths=file_io_or_ns.paths,\n )\n elif file_io_or_ns is None:\n module = _load_builtin_module(inference_state, import_names, sys_path)\n if module is None:\n return NO_VALUES\n else:\n module = _load_python_module(\n inference_state, file_io_or_ns,\n import_names=import_names,\n is_package=is_pkg,\n )\n\n if parent_module_value is None:\n debug.dbg('global search_module %s: %s', import_names[-1], module)\n else:\n debug.dbg('search_module %s in paths %s: %s', module_name, paths, module)\n return ValueSet([module])\n\n\ndef _load_python_module(inference_state, file_io,\n import_names=None, is_package=False):\n module_node = inference_state.parse(\n file_io=file_io,\n cache=True,\n diff_cache=settings.fast_parser,\n cache_path=settings.cache_directory,\n )\n\n from jedi.inference.value import ModuleValue\n return ModuleValue(\n inference_state, module_node,\n file_io=file_io,\n string_names=import_names,\n code_lines=get_cached_code_lines(inference_state.grammar, file_io.path),\n is_package=is_package,\n )\n\n\ndef _load_builtin_module(inference_state, import_names=None, sys_path=None):\n project = inference_state.project\n if sys_path is None:\n sys_path = inference_state.get_sys_path()\n if not project._load_unsafe_extensions:\n safe_paths = project._get_base_sys_path(inference_state)\n sys_path = [p for p in sys_path if p in safe_paths]\n\n dotted_name = '.'.join(import_names)\n assert dotted_name is not None\n module = compiled.load_module(inference_state, dotted_name=dotted_name, sys_path=sys_path)\n if module is None:\n # The file might raise an ImportError e.g. and therefore not be\n # importable.\n return None\n return module\n\n\ndef load_module_from_path(inference_state, file_io, import_names=None, is_package=None):\n """\n This should pretty much only be used for get_modules_containing_name. It's\n here to ensure that a random path is still properly loaded into the Jedi\n module structure.\n """\n path = Path(file_io.path)\n if import_names is None:\n e_sys_path = inference_state.get_sys_path()\n import_names, is_package = sys_path.transform_path_to_dotted(e_sys_path, path)\n else:\n assert isinstance(is_package, bool)\n\n is_stub = path.suffix == '.pyi'\n if is_stub:\n folder_io = file_io.get_parent_folder()\n if folder_io.path.endswith('-stubs'):\n folder_io = FolderIO(folder_io.path[:-6])\n if path.name == '__init__.pyi':\n python_file_io = folder_io.get_file_io('__init__.py')\n else:\n python_file_io = folder_io.get_file_io(import_names[-1] + '.py')\n\n try:\n v = load_module_from_path(\n inference_state, python_file_io,\n import_names, is_package=is_package\n )\n values = ValueSet([v])\n except FileNotFoundError:\n values = NO_VALUES\n\n return create_stub_module(\n inference_state, inference_state.latest_grammar, values,\n parse_stub_module(inference_state, file_io), file_io, import_names\n )\n else:\n module = _load_python_module(\n inference_state, file_io,\n import_names=import_names,\n is_package=is_package,\n )\n inference_state.module_cache.add(import_names, ValueSet([module]))\n return module\n\n\ndef load_namespace_from_path(inference_state, folder_io):\n import_names, is_package = sys_path.transform_path_to_dotted(\n inference_state.get_sys_path(),\n Path(folder_io.path)\n )\n from jedi.inference.value.namespace import ImplicitNamespaceValue\n return ImplicitNamespaceValue(inference_state, import_names, [folder_io.path])\n\n\ndef follow_error_node_imports_if_possible(context, name):\n error_node = tree.search_ancestor(name, 'error_node')\n if error_node is not None:\n # Get the first command start of a started simple_stmt. The error\n # node is sometimes a small_stmt and sometimes a simple_stmt. Check\n # for ; leaves that start a new statements.\n start_index = 0\n for index, n in enumerate(error_node.children):\n if n.start_pos > name.start_pos:\n break\n if n == ';':\n start_index = index + 1\n nodes = error_node.children[start_index:]\n first_name = nodes[0].get_first_leaf().value\n\n # Make it possible to infer stuff like `import foo.` or\n # `from foo.bar`.\n if first_name in ('from', 'import'):\n is_import_from = first_name == 'from'\n level, names = helpers.parse_dotted_names(\n nodes,\n is_import_from=is_import_from,\n until_node=name,\n )\n return Importer(\n context.inference_state, names, context.get_root_context(), level).follow()\n return None\n\n\ndef iter_module_names(inference_state, module_context, search_path,\n module_cls=ImportName, add_builtin_modules=True):\n """\n Get the names of all modules in the search_path. This means file names\n and not names defined in the files.\n """\n # add builtin module names\n if add_builtin_modules:\n for name in inference_state.compiled_subprocess.get_builtin_module_names():\n yield module_cls(module_context, name)\n\n for name in inference_state.compiled_subprocess.iter_module_names(search_path):\n yield module_cls(module_context, name)\n
|
.venv\Lib\site-packages\jedi\inference\imports.py
|
imports.py
|
Python
| 23,082 | 0.95 | 0.221284 | 0.136538 |
awesome-app
| 709 |
2024-07-10T22:45:53.426919
|
BSD-3-Clause
| false |
8e469a6dd9dc51001bbe496d7ce08452
|
from jedi.inference.base_value import ValueSet, NO_VALUES\nfrom jedi.common import monkeypatch\n\n\nclass AbstractLazyValue:\n def __init__(self, data, min=1, max=1):\n self.data = data\n self.min = min\n self.max = max\n\n def __repr__(self):\n return '<%s: %s>' % (self.__class__.__name__, self.data)\n\n def infer(self):\n raise NotImplementedError\n\n\nclass LazyKnownValue(AbstractLazyValue):\n """data is a Value."""\n def infer(self):\n return ValueSet([self.data])\n\n\nclass LazyKnownValues(AbstractLazyValue):\n """data is a ValueSet."""\n def infer(self):\n return self.data\n\n\nclass LazyUnknownValue(AbstractLazyValue):\n def __init__(self, min=1, max=1):\n super().__init__(None, min, max)\n\n def infer(self):\n return NO_VALUES\n\n\nclass LazyTreeValue(AbstractLazyValue):\n def __init__(self, context, node, min=1, max=1):\n super().__init__(node, min, max)\n self.context = context\n # We need to save the predefined names. It's an unfortunate side effect\n # that needs to be tracked otherwise results will be wrong.\n self._predefined_names = dict(context.predefined_names)\n\n def infer(self):\n with monkeypatch(self.context, 'predefined_names', self._predefined_names):\n return self.context.infer_node(self.data)\n\n\ndef get_merged_lazy_value(lazy_values):\n if len(lazy_values) > 1:\n return MergedLazyValues(lazy_values)\n else:\n return lazy_values[0]\n\n\nclass MergedLazyValues(AbstractLazyValue):\n """data is a list of lazy values."""\n def infer(self):\n return ValueSet.from_sets(l.infer() for l in self.data)\n
|
.venv\Lib\site-packages\jedi\inference\lazy_value.py
|
lazy_value.py
|
Python
| 1,667 | 0.95 | 0.311475 | 0.046512 |
node-utils
| 338 |
2023-09-10T11:27:57.718112
|
GPL-3.0
| false |
2b380ad72f9d10f854b989e3aa8a06ad
|
from abc import abstractmethod\nfrom inspect import Parameter\nfrom typing import Optional, Tuple\n\nfrom parso.tree import search_ancestor\n\nfrom jedi.parser_utils import find_statement_documentation, clean_scope_docstring\nfrom jedi.inference.utils import unite\nfrom jedi.inference.base_value import ValueSet, NO_VALUES\nfrom jedi.inference.cache import inference_state_method_cache\nfrom jedi.inference import docstrings\nfrom jedi.cache import memoize_method\nfrom jedi.inference.helpers import deep_ast_copy, infer_call_of_leaf\nfrom jedi.plugins import plugin_manager\n\n\ndef _merge_name_docs(names):\n doc = ''\n for name in names:\n if doc:\n # In case we have multiple values, just return all of them\n # separated by a few dashes.\n doc += '\n' + '-' * 30 + '\n'\n doc += name.py__doc__()\n return doc\n\n\nclass AbstractNameDefinition:\n start_pos: Optional[Tuple[int, int]] = None\n string_name: str\n parent_context = None\n tree_name = None\n is_value_name = True\n """\n Used for the Jedi API to know if it's a keyword or an actual name.\n """\n\n @abstractmethod\n def infer(self):\n raise NotImplementedError\n\n @abstractmethod\n def goto(self):\n # Typically names are already definitions and therefore a goto on that\n # name will always result on itself.\n return {self}\n\n def get_qualified_names(self, include_module_names=False):\n qualified_names = self._get_qualified_names()\n if qualified_names is None or not include_module_names:\n return qualified_names\n\n module_names = self.get_root_context().string_names\n if module_names is None:\n return None\n return module_names + qualified_names\n\n def _get_qualified_names(self):\n # By default, a name has no qualified names.\n return None\n\n def get_root_context(self):\n return self.parent_context.get_root_context()\n\n def get_public_name(self):\n return self.string_name\n\n def __repr__(self):\n if self.start_pos is None:\n return '<%s: string_name=%s>' % (self.__class__.__name__, self.string_name)\n return '<%s: string_name=%s start_pos=%s>' % (self.__class__.__name__,\n self.string_name, self.start_pos)\n\n def is_import(self):\n return False\n\n def py__doc__(self):\n return ''\n\n @property\n def api_type(self):\n return self.parent_context.api_type\n\n def get_defining_qualified_value(self):\n """\n Returns either None or the value that is public and qualified. Won't\n return a function, because a name in a function is never public.\n """\n return None\n\n\nclass AbstractArbitraryName(AbstractNameDefinition):\n """\n When you e.g. want to complete dicts keys, you probably want to complete\n string literals, which is not really a name, but for Jedi we use this\n concept of Name for completions as well.\n """\n is_value_name = False\n\n def __init__(self, inference_state, string):\n self.inference_state = inference_state\n self.string_name = string\n self.parent_context = inference_state.builtins_module\n\n def infer(self):\n return NO_VALUES\n\n\nclass AbstractTreeName(AbstractNameDefinition):\n def __init__(self, parent_context, tree_name):\n self.parent_context = parent_context\n self.tree_name = tree_name\n\n def get_qualified_names(self, include_module_names=False):\n import_node = search_ancestor(self.tree_name, 'import_name', 'import_from')\n # For import nodes we cannot just have names, because it's very unclear\n # how they would look like. For now we just ignore them in most cases.\n # In case of level == 1, it works always, because it's like a submodule\n # lookup.\n if import_node is not None and not (import_node.level == 1\n and self.get_root_context().get_value().is_package()):\n # TODO improve the situation for when level is present.\n if include_module_names and not import_node.level:\n return tuple(n.value for n in import_node.get_path_for_name(self.tree_name))\n else:\n return None\n\n return super().get_qualified_names(include_module_names)\n\n def _get_qualified_names(self):\n parent_names = self.parent_context.get_qualified_names()\n if parent_names is None:\n return None\n return parent_names + (self.tree_name.value,)\n\n def get_defining_qualified_value(self):\n if self.is_import():\n raise NotImplementedError("Shouldn't really happen, please report")\n elif self.parent_context:\n return self.parent_context.get_value() # Might be None\n return None\n\n def goto(self):\n context = self.parent_context\n name = self.tree_name\n definition = name.get_definition(import_name_always=True)\n if definition is not None:\n type_ = definition.type\n if type_ == 'expr_stmt':\n # Only take the parent, because if it's more complicated than just\n # a name it's something you can "goto" again.\n is_simple_name = name.parent.type not in ('power', 'trailer')\n if is_simple_name:\n return [self]\n elif type_ in ('import_from', 'import_name'):\n from jedi.inference.imports import goto_import\n module_names = goto_import(context, name)\n return module_names\n else:\n return [self]\n else:\n from jedi.inference.imports import follow_error_node_imports_if_possible\n values = follow_error_node_imports_if_possible(context, name)\n if values is not None:\n return [value.name for value in values]\n\n par = name.parent\n node_type = par.type\n if node_type == 'argument' and par.children[1] == '=' and par.children[0] == name:\n # Named param goto.\n trailer = par.parent\n if trailer.type == 'arglist':\n trailer = trailer.parent\n if trailer.type != 'classdef':\n if trailer.type == 'decorator':\n value_set = context.infer_node(trailer.children[1])\n else:\n i = trailer.parent.children.index(trailer)\n to_infer = trailer.parent.children[:i]\n if to_infer[0] == 'await':\n to_infer.pop(0)\n value_set = context.infer_node(to_infer[0])\n from jedi.inference.syntax_tree import infer_trailer\n for trailer in to_infer[1:]:\n value_set = infer_trailer(context, value_set, trailer)\n param_names = []\n for value in value_set:\n for signature in value.get_signatures():\n for param_name in signature.get_param_names():\n if param_name.string_name == name.value:\n param_names.append(param_name)\n return param_names\n elif node_type == 'dotted_name': # Is a decorator.\n index = par.children.index(name)\n if index > 0:\n new_dotted = deep_ast_copy(par)\n new_dotted.children[index - 1:] = []\n values = context.infer_node(new_dotted)\n return unite(\n value.goto(name, name_context=context)\n for value in values\n )\n\n if node_type == 'trailer' and par.children[0] == '.':\n values = infer_call_of_leaf(context, name, cut_own_trailer=True)\n return values.goto(name, name_context=context)\n else:\n stmt = search_ancestor(\n name, 'expr_stmt', 'lambdef'\n ) or name\n if stmt.type == 'lambdef':\n stmt = name\n return context.goto(name, position=stmt.start_pos)\n\n def is_import(self):\n imp = search_ancestor(self.tree_name, 'import_from', 'import_name')\n return imp is not None\n\n @property\n def string_name(self):\n return self.tree_name.value\n\n @property\n def start_pos(self):\n return self.tree_name.start_pos\n\n\nclass ValueNameMixin:\n def infer(self):\n return ValueSet([self._value])\n\n def py__doc__(self):\n doc = self._value.py__doc__()\n if not doc and self._value.is_stub():\n from jedi.inference.gradual.conversion import convert_names\n names = convert_names([self], prefer_stub_to_compiled=False)\n if self not in names:\n return _merge_name_docs(names)\n return doc\n\n def _get_qualified_names(self):\n return self._value.get_qualified_names()\n\n def get_root_context(self):\n if self.parent_context is None: # A module\n return self._value.as_context()\n return super().get_root_context()\n\n def get_defining_qualified_value(self):\n context = self.parent_context\n if context is not None and (context.is_module() or context.is_class()):\n return self.parent_context.get_value() # Might be None\n return None\n\n @property\n def api_type(self):\n return self._value.api_type\n\n\nclass ValueName(ValueNameMixin, AbstractTreeName):\n def __init__(self, value, tree_name):\n super().__init__(value.parent_context, tree_name)\n self._value = value\n\n def goto(self):\n return ValueSet([self._value.name])\n\n\nclass TreeNameDefinition(AbstractTreeName):\n _API_TYPES = dict(\n import_name='module',\n import_from='module',\n funcdef='function',\n param='param',\n classdef='class',\n )\n\n def infer(self):\n # Refactor this, should probably be here.\n from jedi.inference.syntax_tree import tree_name_to_values\n return tree_name_to_values(\n self.parent_context.inference_state,\n self.parent_context,\n self.tree_name\n )\n\n @property\n def api_type(self):\n definition = self.tree_name.get_definition(import_name_always=True)\n if definition is None:\n return 'statement'\n return self._API_TYPES.get(definition.type, 'statement')\n\n def assignment_indexes(self):\n """\n Returns an array of tuple(int, node) of the indexes that are used in\n tuple assignments.\n\n For example if the name is ``y`` in the following code::\n\n x, (y, z) = 2, ''\n\n would result in ``[(1, xyz_node), (0, yz_node)]``.\n\n When searching for b in the case ``a, *b, c = [...]`` it will return::\n\n [(slice(1, -1), abc_node)]\n """\n indexes = []\n is_star_expr = False\n node = self.tree_name.parent\n compare = self.tree_name\n while node is not None:\n if node.type in ('testlist', 'testlist_comp', 'testlist_star_expr', 'exprlist'):\n for i, child in enumerate(node.children):\n if child == compare:\n index = int(i / 2)\n if is_star_expr:\n from_end = int((len(node.children) - i) / 2)\n index = slice(index, -from_end)\n indexes.insert(0, (index, node))\n break\n else:\n raise LookupError("Couldn't find the assignment.")\n is_star_expr = False\n elif node.type == 'star_expr':\n is_star_expr = True\n elif node.type in ('expr_stmt', 'sync_comp_for'):\n break\n\n compare = node\n node = node.parent\n return indexes\n\n @property\n def inference_state(self):\n # Used by the cache function below\n return self.parent_context.inference_state\n\n @inference_state_method_cache(default='')\n def py__doc__(self):\n api_type = self.api_type\n if api_type in ('function', 'class', 'property'):\n if self.parent_context.get_root_context().is_stub():\n from jedi.inference.gradual.conversion import convert_names\n names = convert_names([self], prefer_stub_to_compiled=False)\n if self not in names:\n return _merge_name_docs(names)\n\n # Make sure the names are not TreeNameDefinitions anymore.\n return clean_scope_docstring(self.tree_name.get_definition())\n\n if api_type == 'module':\n names = self.goto()\n if self not in names:\n return _merge_name_docs(names)\n\n if api_type == 'statement' and self.tree_name.is_definition():\n return find_statement_documentation(self.tree_name.get_definition())\n return ''\n\n\nclass _ParamMixin:\n def maybe_positional_argument(self, include_star=True):\n options = [Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD]\n if include_star:\n options.append(Parameter.VAR_POSITIONAL)\n return self.get_kind() in options\n\n def maybe_keyword_argument(self, include_stars=True):\n options = [Parameter.KEYWORD_ONLY, Parameter.POSITIONAL_OR_KEYWORD]\n if include_stars:\n options.append(Parameter.VAR_KEYWORD)\n return self.get_kind() in options\n\n def _kind_string(self):\n kind = self.get_kind()\n if kind == Parameter.VAR_POSITIONAL: # *args\n return '*'\n if kind == Parameter.VAR_KEYWORD: # **kwargs\n return '**'\n return ''\n\n def get_qualified_names(self, include_module_names=False):\n return None\n\n\nclass ParamNameInterface(_ParamMixin):\n api_type = 'param'\n\n def get_kind(self):\n raise NotImplementedError\n\n def to_string(self):\n raise NotImplementedError\n\n def get_executed_param_name(self):\n """\n For dealing with type inference and working around the graph, we\n sometimes want to have the param name of the execution. This feels a\n bit strange and we might have to refactor at some point.\n\n For now however it exists to avoid infering params when we don't really\n need them (e.g. when we can just instead use annotations.\n """\n return None\n\n @property\n def star_count(self):\n kind = self.get_kind()\n if kind == Parameter.VAR_POSITIONAL:\n return 1\n if kind == Parameter.VAR_KEYWORD:\n return 2\n return 0\n\n def infer_default(self):\n return NO_VALUES\n\n\nclass BaseTreeParamName(ParamNameInterface, AbstractTreeName):\n annotation_node = None\n default_node = None\n\n def to_string(self):\n output = self._kind_string() + self.get_public_name()\n annotation = self.annotation_node\n default = self.default_node\n if annotation is not None:\n output += ': ' + annotation.get_code(include_prefix=False)\n if default is not None:\n output += '=' + default.get_code(include_prefix=False)\n return output\n\n def get_public_name(self):\n name = self.string_name\n if name.startswith('__'):\n # Params starting with __ are an equivalent to positional only\n # variables in typeshed.\n name = name[2:]\n return name\n\n def goto(self, **kwargs):\n return [self]\n\n\nclass _ActualTreeParamName(BaseTreeParamName):\n def __init__(self, function_value, tree_name):\n super().__init__(\n function_value.get_default_param_context(), tree_name)\n self.function_value = function_value\n\n def _get_param_node(self):\n return search_ancestor(self.tree_name, 'param')\n\n @property\n def annotation_node(self):\n return self._get_param_node().annotation\n\n def infer_annotation(self, execute_annotation=True, ignore_stars=False):\n from jedi.inference.gradual.annotation import infer_param\n values = infer_param(\n self.function_value, self._get_param_node(),\n ignore_stars=ignore_stars)\n if execute_annotation:\n values = values.execute_annotation()\n return values\n\n def infer_default(self):\n node = self.default_node\n if node is None:\n return NO_VALUES\n return self.parent_context.infer_node(node)\n\n @property\n def default_node(self):\n return self._get_param_node().default\n\n def get_kind(self):\n tree_param = self._get_param_node()\n if tree_param.star_count == 1: # *args\n return Parameter.VAR_POSITIONAL\n if tree_param.star_count == 2: # **kwargs\n return Parameter.VAR_KEYWORD\n\n # Params starting with __ are an equivalent to positional only\n # variables in typeshed.\n if tree_param.name.value.startswith('__'):\n return Parameter.POSITIONAL_ONLY\n\n parent = tree_param.parent\n param_appeared = False\n for p in parent.children:\n if param_appeared:\n if p == '/':\n return Parameter.POSITIONAL_ONLY\n else:\n if p == '*':\n return Parameter.KEYWORD_ONLY\n if p.type == 'param':\n if p.star_count:\n return Parameter.KEYWORD_ONLY\n if p == tree_param:\n param_appeared = True\n return Parameter.POSITIONAL_OR_KEYWORD\n\n def infer(self):\n values = self.infer_annotation()\n if values:\n return values\n\n doc_params = docstrings.infer_param(self.function_value, self._get_param_node())\n return doc_params\n\n\nclass AnonymousParamName(_ActualTreeParamName):\n @plugin_manager.decorate(name='goto_anonymous_param')\n def goto(self):\n return super().goto()\n\n @plugin_manager.decorate(name='infer_anonymous_param')\n def infer(self):\n values = super().infer()\n if values:\n return values\n from jedi.inference.dynamic_params import dynamic_param_lookup\n param = self._get_param_node()\n values = dynamic_param_lookup(self.function_value, param.position_index)\n if values:\n return values\n\n if param.star_count == 1:\n from jedi.inference.value.iterable import FakeTuple\n value = FakeTuple(self.function_value.inference_state, [])\n elif param.star_count == 2:\n from jedi.inference.value.iterable import FakeDict\n value = FakeDict(self.function_value.inference_state, {})\n elif param.default is None:\n return NO_VALUES\n else:\n return self.function_value.parent_context.infer_node(param.default)\n return ValueSet({value})\n\n\nclass ParamName(_ActualTreeParamName):\n def __init__(self, function_value, tree_name, arguments):\n super().__init__(function_value, tree_name)\n self.arguments = arguments\n\n def infer(self):\n values = super().infer()\n if values:\n return values\n\n return self.get_executed_param_name().infer()\n\n def get_executed_param_name(self):\n from jedi.inference.param import get_executed_param_names\n params_names = get_executed_param_names(self.function_value, self.arguments)\n return params_names[self._get_param_node().position_index]\n\n\nclass ParamNameWrapper(_ParamMixin):\n def __init__(self, param_name):\n self._wrapped_param_name = param_name\n\n def __getattr__(self, name):\n return getattr(self._wrapped_param_name, name)\n\n def __repr__(self):\n return '<%s: %s>' % (self.__class__.__name__, self._wrapped_param_name)\n\n\nclass ImportName(AbstractNameDefinition):\n start_pos = (1, 0)\n _level = 0\n\n def __init__(self, parent_context, string_name):\n self._from_module_context = parent_context\n self.string_name = string_name\n\n def get_qualified_names(self, include_module_names=False):\n if include_module_names:\n if self._level:\n assert self._level == 1, "Everything else is not supported for now"\n module_names = self._from_module_context.string_names\n if module_names is None:\n return module_names\n return module_names + (self.string_name,)\n return (self.string_name,)\n return ()\n\n @property\n def parent_context(self):\n m = self._from_module_context\n import_values = self.infer()\n if not import_values:\n return m\n # It's almost always possible to find the import or to not find it. The\n # importing returns only one value, pretty much always.\n return next(iter(import_values)).as_context()\n\n @memoize_method\n def infer(self):\n from jedi.inference.imports import Importer\n m = self._from_module_context\n return Importer(m.inference_state, [self.string_name], m, level=self._level).follow()\n\n def goto(self):\n return [m.name for m in self.infer()]\n\n @property\n def api_type(self):\n return 'module'\n\n def py__doc__(self):\n return _merge_name_docs(self.goto())\n\n\nclass SubModuleName(ImportName):\n _level = 1\n\n\nclass NameWrapper:\n def __init__(self, wrapped_name):\n self._wrapped_name = wrapped_name\n\n def __getattr__(self, name):\n return getattr(self._wrapped_name, name)\n\n def __repr__(self):\n return '%s(%s)' % (self.__class__.__name__, self._wrapped_name)\n\n\nclass StubNameMixin:\n def py__doc__(self):\n from jedi.inference.gradual.conversion import convert_names\n # Stubs are not complicated and we can just follow simple statements\n # that have an equals in them, because they typically make something\n # else public. See e.g. stubs for `requests`.\n names = [self]\n if self.api_type == 'statement' and '=' in self.tree_name.get_definition().children:\n names = [v.name for v in self.infer()]\n\n names = convert_names(names, prefer_stub_to_compiled=False)\n if self in names:\n return super().py__doc__()\n else:\n # We have signatures ourselves in stubs, so don't use signatures\n # from the implementation.\n return _merge_name_docs(names)\n\n\n# From here on down we make looking up the sys.version_info fast.\nclass StubName(StubNameMixin, TreeNameDefinition):\n def infer(self):\n inferred = super().infer()\n if self.string_name == 'version_info' and self.get_root_context().py__name__() == 'sys':\n from jedi.inference.gradual.stub_value import VersionInfo\n return ValueSet(VersionInfo(c) for c in inferred)\n return inferred\n\n\nclass ModuleName(ValueNameMixin, AbstractNameDefinition):\n start_pos = 1, 0\n\n def __init__(self, value, name):\n self._value = value\n self._name = name\n\n @property\n def string_name(self):\n return self._name\n\n\nclass StubModuleName(StubNameMixin, ModuleName):\n pass\n
|
.venv\Lib\site-packages\jedi\inference\names.py
|
names.py
|
Python
| 23,214 | 0.95 | 0.288035 | 0.051095 |
react-lib
| 971 |
2023-09-26T11:42:48.267516
|
Apache-2.0
| false |
2156094d697fda6792c4c7146a2f1ffb
|
from collections import defaultdict\nfrom inspect import Parameter\n\nfrom jedi import debug\nfrom jedi.inference.utils import PushBackIterator\nfrom jedi.inference import analysis\nfrom jedi.inference.lazy_value import LazyKnownValue, \\n LazyTreeValue, LazyUnknownValue\nfrom jedi.inference.value import iterable\nfrom jedi.inference.names import ParamName\n\n\ndef _add_argument_issue(error_name, lazy_value, message):\n if isinstance(lazy_value, LazyTreeValue):\n node = lazy_value.data\n if node.parent.type == 'argument':\n node = node.parent\n return analysis.add(lazy_value.context, error_name, node, message)\n\n\nclass ExecutedParamName(ParamName):\n def __init__(self, function_value, arguments, param_node, lazy_value, is_default=False):\n super().__init__(function_value, param_node.name, arguments=arguments)\n self._lazy_value = lazy_value\n self._is_default = is_default\n\n def infer(self):\n return self._lazy_value.infer()\n\n def matches_signature(self):\n if self._is_default:\n return True\n argument_values = self.infer().py__class__()\n if self.get_kind() in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD):\n return True\n annotations = self.infer_annotation(execute_annotation=False)\n if not annotations:\n # If we cannot infer annotations - or there aren't any - pretend\n # that the signature matches.\n return True\n matches = any(c1.is_sub_class_of(c2)\n for c1 in argument_values\n for c2 in annotations.gather_annotation_classes())\n debug.dbg("param compare %s: %s <=> %s",\n matches, argument_values, annotations, color='BLUE')\n return matches\n\n def __repr__(self):\n return '<%s: %s>' % (self.__class__.__name__, self.string_name)\n\n\ndef get_executed_param_names_and_issues(function_value, arguments):\n """\n Return a tuple of:\n - a list of `ExecutedParamName`s corresponding to the arguments of the\n function execution `function_value`, containing the inferred value of\n those arguments (whether explicit or default)\n - a list of the issues encountered while building that list\n\n For example, given:\n ```\n def foo(a, b, c=None, d='d'): ...\n\n foo(42, c='c')\n ```\n\n Then for the execution of `foo`, this will return a tuple containing:\n - a list with entries for each parameter a, b, c & d; the entries for a,\n c, & d will have their values (42, 'c' and 'd' respectively) included.\n - a list with a single entry about the lack of a value for `b`\n """\n def too_many_args(argument):\n m = _error_argument_count(funcdef, len(unpacked_va))\n # Just report an error for the first param that is not needed (like\n # cPython).\n if arguments.get_calling_nodes():\n # There might not be a valid calling node so check for that first.\n issues.append(\n _add_argument_issue(\n 'type-error-too-many-arguments',\n argument,\n message=m\n )\n )\n else:\n issues.append(None)\n debug.warning('non-public warning: %s', m)\n\n issues = [] # List[Optional[analysis issue]]\n result_params = []\n param_dict = {}\n funcdef = function_value.tree_node\n # Default params are part of the value where the function was defined.\n # This means that they might have access on class variables that the\n # function itself doesn't have.\n default_param_context = function_value.get_default_param_context()\n\n for param in funcdef.get_params():\n param_dict[param.name.value] = param\n unpacked_va = list(arguments.unpack(funcdef))\n var_arg_iterator = PushBackIterator(iter(unpacked_va))\n\n non_matching_keys = defaultdict(lambda: [])\n keys_used = {}\n keys_only = False\n had_multiple_value_error = False\n for param in funcdef.get_params():\n # The value and key can both be null. There, the defaults apply.\n # args / kwargs will just be empty arrays / dicts, respectively.\n # Wrong value count is just ignored. If you try to test cases that are\n # not allowed in Python, Jedi will maybe not show any completions.\n is_default = False\n key, argument = next(var_arg_iterator, (None, None))\n while key is not None:\n keys_only = True\n try:\n key_param = param_dict[key]\n except KeyError:\n non_matching_keys[key] = argument\n else:\n if key in keys_used:\n had_multiple_value_error = True\n m = ("TypeError: %s() got multiple values for keyword argument '%s'."\n % (funcdef.name, key))\n for contextualized_node in arguments.get_calling_nodes():\n issues.append(\n analysis.add(contextualized_node.context,\n 'type-error-multiple-values',\n contextualized_node.node, message=m)\n )\n else:\n keys_used[key] = ExecutedParamName(\n function_value, arguments, key_param, argument)\n key, argument = next(var_arg_iterator, (None, None))\n\n try:\n result_params.append(keys_used[param.name.value])\n continue\n except KeyError:\n pass\n\n if param.star_count == 1:\n # *args param\n lazy_value_list = []\n if argument is not None:\n lazy_value_list.append(argument)\n for key, argument in var_arg_iterator:\n # Iterate until a key argument is found.\n if key:\n var_arg_iterator.push_back((key, argument))\n break\n lazy_value_list.append(argument)\n seq = iterable.FakeTuple(function_value.inference_state, lazy_value_list)\n result_arg = LazyKnownValue(seq)\n elif param.star_count == 2:\n if argument is not None:\n too_many_args(argument)\n # **kwargs param\n dct = iterable.FakeDict(function_value.inference_state, dict(non_matching_keys))\n result_arg = LazyKnownValue(dct)\n non_matching_keys = {}\n else:\n # normal param\n if argument is None:\n # No value: Return an empty container\n if param.default is None:\n result_arg = LazyUnknownValue()\n if not keys_only:\n for contextualized_node in arguments.get_calling_nodes():\n m = _error_argument_count(funcdef, len(unpacked_va))\n issues.append(\n analysis.add(\n contextualized_node.context,\n 'type-error-too-few-arguments',\n contextualized_node.node,\n message=m,\n )\n )\n else:\n result_arg = LazyTreeValue(default_param_context, param.default)\n is_default = True\n else:\n result_arg = argument\n\n result_params.append(ExecutedParamName(\n function_value, arguments, param, result_arg, is_default=is_default\n ))\n if not isinstance(result_arg, LazyUnknownValue):\n keys_used[param.name.value] = result_params[-1]\n\n if keys_only:\n # All arguments should be handed over to the next function. It's not\n # about the values inside, it's about the names. Jedi needs to now that\n # there's nothing to find for certain names.\n for k in set(param_dict) - set(keys_used):\n param = param_dict[k]\n\n if not (non_matching_keys or had_multiple_value_error\n or param.star_count or param.default):\n # add a warning only if there's not another one.\n for contextualized_node in arguments.get_calling_nodes():\n m = _error_argument_count(funcdef, len(unpacked_va))\n issues.append(\n analysis.add(contextualized_node.context,\n 'type-error-too-few-arguments',\n contextualized_node.node, message=m)\n )\n\n for key, lazy_value in non_matching_keys.items():\n m = "TypeError: %s() got an unexpected keyword argument '%s'." \\n % (funcdef.name, key)\n issues.append(\n _add_argument_issue(\n 'type-error-keyword-argument',\n lazy_value,\n message=m\n )\n )\n\n remaining_arguments = list(var_arg_iterator)\n if remaining_arguments:\n first_key, lazy_value = remaining_arguments[0]\n too_many_args(lazy_value)\n return result_params, issues\n\n\ndef get_executed_param_names(function_value, arguments):\n """\n Return a list of `ExecutedParamName`s corresponding to the arguments of the\n function execution `function_value`, containing the inferred value of those\n arguments (whether explicit or default). Any issues building this list (for\n example required arguments which are missing in the invocation) are ignored.\n\n For example, given:\n ```\n def foo(a, b, c=None, d='d'): ...\n\n foo(42, c='c')\n ```\n\n Then for the execution of `foo`, this will return a list containing entries\n for each parameter a, b, c & d; the entries for a, c, & d will have their\n values (42, 'c' and 'd' respectively) included.\n """\n return get_executed_param_names_and_issues(function_value, arguments)[0]\n\n\ndef _error_argument_count(funcdef, actual_count):\n params = funcdef.get_params()\n default_arguments = sum(1 for p in params if p.default or p.star_count)\n\n if default_arguments == 0:\n before = 'exactly '\n else:\n before = 'from %s to ' % (len(params) - default_arguments)\n return ('TypeError: %s() takes %s%s arguments (%s given).'\n % (funcdef.name, before, len(params), actual_count))\n
|
.venv\Lib\site-packages\jedi\inference\param.py
|
param.py
|
Python
| 10,450 | 0.95 | 0.2607 | 0.09292 |
react-lib
| 951 |
2023-12-07T18:42:13.835406
|
Apache-2.0
| false |
9c38997d35bbbd8ef9ec920eb2cb812d
|
from jedi.inference.cache import inference_state_function_cache\n\n\n@inference_state_function_cache()\ndef get_yield_exprs(inference_state, funcdef):\n return list(funcdef.iter_yield_exprs())\n
|
.venv\Lib\site-packages\jedi\inference\parser_cache.py
|
parser_cache.py
|
Python
| 191 | 0.85 | 0.166667 | 0 |
awesome-app
| 106 |
2025-07-04T06:13:54.924646
|
Apache-2.0
| false |
e49edf1589f18485ee4b6ea8ce9391c3
|
"""\nRecursions are the recipe of |jedi| to conquer Python code. However, someone\nmust stop recursions going mad. Some settings are here to make |jedi| stop at\nthe right time. You can read more about them :ref:`here <settings-recursion>`.\n\nNext to the internal ``jedi.inference.cache`` this module also makes |jedi| not\nthread-safe, because ``execution_recursion_decorator`` uses class variables to\ncount the function calls.\n\n.. _settings-recursion:\n\nSettings\n~~~~~~~~~~\n\nRecursion settings are important if you don't want extremely\nrecursive python code to go absolutely crazy.\n\nThe default values are based on experiments while completing the |jedi| library\nitself (inception!). But I don't think there's any other Python library that\nuses recursion in a similarly extreme way. Completion should also be fast and\ntherefore the quality might not always be maximal.\n\n.. autodata:: recursion_limit\n.. autodata:: total_function_execution_limit\n.. autodata:: per_function_execution_limit\n.. autodata:: per_function_recursion_limit\n"""\n\nfrom contextlib import contextmanager\n\nfrom jedi import debug\nfrom jedi.inference.base_value import NO_VALUES\n\n\nrecursion_limit = 15\n"""\nLike :func:`sys.getrecursionlimit()`, just for |jedi|.\n"""\ntotal_function_execution_limit = 200\n"""\nThis is a hard limit of how many non-builtin functions can be executed.\n"""\nper_function_execution_limit = 6\n"""\nThe maximal amount of times a specific function may be executed.\n"""\nper_function_recursion_limit = 2\n"""\nA function may not be executed more than this number of times recursively.\n"""\n\n\nclass RecursionDetector:\n def __init__(self):\n self.pushed_nodes = []\n\n\n@contextmanager\ndef execution_allowed(inference_state, node):\n """\n A decorator to detect recursions in statements. In a recursion a statement\n at the same place, in the same module may not be executed two times.\n """\n pushed_nodes = inference_state.recursion_detector.pushed_nodes\n\n if node in pushed_nodes:\n debug.warning('catched stmt recursion: %s @%s', node,\n getattr(node, 'start_pos', None))\n yield False\n else:\n try:\n pushed_nodes.append(node)\n yield True\n finally:\n pushed_nodes.pop()\n\n\ndef execution_recursion_decorator(default=NO_VALUES):\n def decorator(func):\n def wrapper(self, **kwargs):\n detector = self.inference_state.execution_recursion_detector\n limit_reached = detector.push_execution(self)\n try:\n if limit_reached:\n result = default\n else:\n result = func(self, **kwargs)\n finally:\n detector.pop_execution()\n return result\n return wrapper\n return decorator\n\n\nclass ExecutionRecursionDetector:\n """\n Catches recursions of executions.\n """\n def __init__(self, inference_state):\n self._inference_state = inference_state\n\n self._recursion_level = 0\n self._parent_execution_funcs = []\n self._funcdef_execution_counts = {}\n self._execution_count = 0\n\n def pop_execution(self):\n self._parent_execution_funcs.pop()\n self._recursion_level -= 1\n\n def push_execution(self, execution):\n funcdef = execution.tree_node\n\n # These two will be undone in pop_execution.\n self._recursion_level += 1\n self._parent_execution_funcs.append(funcdef)\n\n module_context = execution.get_root_context()\n\n if module_context.is_builtins_module():\n # We have control over builtins so we know they are not recursing\n # like crazy. Therefore we just let them execute always, because\n # they usually just help a lot with getting good results.\n return False\n\n if self._recursion_level > recursion_limit:\n debug.warning('Recursion limit (%s) reached', recursion_limit)\n return True\n\n if self._execution_count >= total_function_execution_limit:\n debug.warning('Function execution limit (%s) reached', total_function_execution_limit)\n return True\n self._execution_count += 1\n\n if self._funcdef_execution_counts.setdefault(funcdef, 0) >= per_function_execution_limit:\n if module_context.py__name__() == 'typing':\n return False\n debug.warning(\n 'Per function execution limit (%s) reached: %s',\n per_function_execution_limit,\n funcdef\n )\n return True\n self._funcdef_execution_counts[funcdef] += 1\n\n if self._parent_execution_funcs.count(funcdef) > per_function_recursion_limit:\n debug.warning(\n 'Per function recursion limit (%s) reached: %s',\n per_function_recursion_limit,\n funcdef\n )\n return True\n return False\n
|
.venv\Lib\site-packages\jedi\inference\recursion.py
|
recursion.py
|
Python
| 4,932 | 0.95 | 0.189542 | 0.032258 |
python-kit
| 817 |
2024-02-28T11:59:04.547623
|
GPL-3.0
| false |
9806423d59e668bb57b001e6f7698116
|
import os\nimport re\n\nfrom parso import python_bytes_to_unicode\n\nfrom jedi.debug import dbg\nfrom jedi.file_io import KnownContentFileIO, FolderIO\nfrom jedi.inference.names import SubModuleName\nfrom jedi.inference.imports import load_module_from_path\nfrom jedi.inference.filters import ParserTreeFilter\nfrom jedi.inference.gradual.conversion import convert_names\n\n_IGNORE_FOLDERS = ('.tox', '.venv', '.mypy_cache', 'venv', '__pycache__')\n\n_OPENED_FILE_LIMIT = 2000\n"""\nStats from a 2016 Lenovo Notebook running Linux:\nWith os.walk, it takes about 10s to scan 11'000 files (without filesystem\ncaching). Once cached it only takes 5s. So it is expected that reading all\nthose files might take a few seconds, but not a lot more.\n"""\n_PARSED_FILE_LIMIT = 30\n"""\nFor now we keep the amount of parsed files really low, since parsing might take\neasily 100ms for bigger files.\n"""\n\n\ndef _resolve_names(definition_names, avoid_names=()):\n for name in definition_names:\n if name in avoid_names:\n # Avoiding recursions here, because goto on a module name lands\n # on the same module.\n continue\n\n if not isinstance(name, SubModuleName):\n # SubModuleNames are not actually existing names but created\n # names when importing something like `import foo.bar.baz`.\n yield name\n\n if name.api_type == 'module':\n yield from _resolve_names(name.goto(), definition_names)\n\n\ndef _dictionarize(names):\n return dict(\n (n if n.tree_name is None else n.tree_name, n)\n for n in names\n )\n\n\ndef _find_defining_names(module_context, tree_name):\n found_names = _find_names(module_context, tree_name)\n\n for name in list(found_names):\n # Convert from/to stubs, because those might also be usages.\n found_names |= set(convert_names(\n [name],\n only_stubs=not name.get_root_context().is_stub(),\n prefer_stub_to_compiled=False\n ))\n\n found_names |= set(_find_global_variables(found_names, tree_name.value))\n for name in list(found_names):\n if name.api_type == 'param' or name.tree_name is None \\n or name.tree_name.parent.type == 'trailer':\n continue\n found_names |= set(_add_names_in_same_context(name.parent_context, name.string_name))\n return set(_resolve_names(found_names))\n\n\ndef _find_names(module_context, tree_name):\n name = module_context.create_name(tree_name)\n found_names = set(name.goto())\n found_names.add(name)\n\n return set(_resolve_names(found_names))\n\n\ndef _add_names_in_same_context(context, string_name):\n if context.tree_node is None:\n return\n\n until_position = None\n while True:\n filter_ = ParserTreeFilter(\n parent_context=context,\n until_position=until_position,\n )\n names = set(filter_.get(string_name))\n if not names:\n break\n yield from names\n ordered = sorted(names, key=lambda x: x.start_pos)\n until_position = ordered[0].start_pos\n\n\ndef _find_global_variables(names, search_name):\n for name in names:\n if name.tree_name is None:\n continue\n module_context = name.get_root_context()\n try:\n method = module_context.get_global_filter\n except AttributeError:\n continue\n else:\n for global_name in method().get(search_name):\n yield global_name\n c = module_context.create_context(global_name.tree_name)\n yield from _add_names_in_same_context(c, global_name.string_name)\n\n\ndef find_references(module_context, tree_name, only_in_module=False):\n inf = module_context.inference_state\n search_name = tree_name.value\n\n # We disable flow analysis, because if we have ifs that are only true in\n # certain cases, we want both sides.\n try:\n inf.flow_analysis_enabled = False\n found_names = _find_defining_names(module_context, tree_name)\n finally:\n inf.flow_analysis_enabled = True\n\n found_names_dct = _dictionarize(found_names)\n\n module_contexts = [module_context]\n if not only_in_module:\n for m in set(d.get_root_context() for d in found_names):\n if m != module_context and m.tree_node is not None \\n and inf.project.path in m.py__file__().parents:\n module_contexts.append(m)\n # For param no search for other modules is necessary.\n if only_in_module or any(n.api_type == 'param' for n in found_names):\n potential_modules = module_contexts\n else:\n potential_modules = get_module_contexts_containing_name(\n inf,\n module_contexts,\n search_name,\n )\n\n non_matching_reference_maps = {}\n for module_context in potential_modules:\n for name_leaf in module_context.tree_node.get_used_names().get(search_name, []):\n new = _dictionarize(_find_names(module_context, name_leaf))\n if any(tree_name in found_names_dct for tree_name in new):\n found_names_dct.update(new)\n for tree_name in new:\n for dct in non_matching_reference_maps.get(tree_name, []):\n # A reference that was previously searched for matches\n # with a now found name. Merge.\n found_names_dct.update(dct)\n try:\n del non_matching_reference_maps[tree_name]\n except KeyError:\n pass\n else:\n for name in new:\n non_matching_reference_maps.setdefault(name, []).append(new)\n result = found_names_dct.values()\n if only_in_module:\n return [n for n in result if n.get_root_context() == module_context]\n return result\n\n\ndef _check_fs(inference_state, file_io, regex):\n try:\n code = file_io.read()\n except FileNotFoundError:\n return None\n code = python_bytes_to_unicode(code, errors='replace')\n if not regex.search(code):\n return None\n new_file_io = KnownContentFileIO(file_io.path, code)\n m = load_module_from_path(inference_state, new_file_io)\n if m.is_compiled():\n return None\n return m.as_context()\n\n\ndef gitignored_paths(folder_io, file_io):\n ignored_paths_abs = set()\n ignored_paths_rel = set()\n\n for l in file_io.read().splitlines():\n if not l or l.startswith(b'#') or l.startswith(b'!') or b'*' in l:\n continue\n\n p = l.decode('utf-8', 'ignore').rstrip('/')\n if '/' in p:\n name = p.lstrip('/')\n ignored_paths_abs.add(os.path.join(folder_io.path, name))\n else:\n name = p\n ignored_paths_rel.add((folder_io.path, name))\n\n return ignored_paths_abs, ignored_paths_rel\n\n\ndef expand_relative_ignore_paths(folder_io, relative_paths):\n curr_path = folder_io.path\n return {os.path.join(curr_path, p[1]) for p in relative_paths if curr_path.startswith(p[0])}\n\n\ndef recurse_find_python_folders_and_files(folder_io, except_paths=()):\n except_paths = set(except_paths)\n except_paths_relative = set()\n\n for root_folder_io, folder_ios, file_ios in folder_io.walk():\n # Delete folders that we don't want to iterate over.\n for file_io in file_ios:\n path = file_io.path\n if path.suffix in ('.py', '.pyi'):\n if path not in except_paths:\n yield None, file_io\n\n if path.name == '.gitignore':\n ignored_paths_abs, ignored_paths_rel = gitignored_paths(\n root_folder_io, file_io\n )\n except_paths |= ignored_paths_abs\n except_paths_relative |= ignored_paths_rel\n\n except_paths_relative_expanded = expand_relative_ignore_paths(\n root_folder_io, except_paths_relative\n )\n\n folder_ios[:] = [\n folder_io\n for folder_io in folder_ios\n if folder_io.path not in except_paths\n and folder_io.path not in except_paths_relative_expanded\n and folder_io.get_base_name() not in _IGNORE_FOLDERS\n ]\n for folder_io in folder_ios:\n yield folder_io, None\n\n\ndef recurse_find_python_files(folder_io, except_paths=()):\n for folder_io, file_io in recurse_find_python_folders_and_files(folder_io, except_paths):\n if file_io is not None:\n yield file_io\n\n\ndef _find_python_files_in_sys_path(inference_state, module_contexts):\n sys_path = inference_state.get_sys_path()\n except_paths = set()\n yielded_paths = [m.py__file__() for m in module_contexts]\n for module_context in module_contexts:\n file_io = module_context.get_value().file_io\n if file_io is None:\n continue\n\n folder_io = file_io.get_parent_folder()\n while True:\n path = folder_io.path\n if not any(path.startswith(p) for p in sys_path) or path in except_paths:\n break\n for file_io in recurse_find_python_files(folder_io, except_paths):\n if file_io.path not in yielded_paths:\n yield file_io\n except_paths.add(path)\n folder_io = folder_io.get_parent_folder()\n\n\ndef _find_project_modules(inference_state, module_contexts):\n except_ = [m.py__file__() for m in module_contexts]\n yield from recurse_find_python_files(FolderIO(inference_state.project.path), except_)\n\n\ndef get_module_contexts_containing_name(inference_state, module_contexts, name,\n limit_reduction=1):\n """\n Search a name in the directories of modules.\n\n :param limit_reduction: Divides the limits on opening/parsing files by this\n factor.\n """\n # Skip non python modules\n for module_context in module_contexts:\n if module_context.is_compiled():\n continue\n yield module_context\n\n # Very short names are not searched in other modules for now to avoid lots\n # of file lookups.\n if len(name) <= 2:\n return\n\n # Currently not used, because there's only `scope=project` and `scope=file`\n # At the moment there is no such thing as `scope=sys.path`.\n # file_io_iterator = _find_python_files_in_sys_path(inference_state, module_contexts)\n file_io_iterator = _find_project_modules(inference_state, module_contexts)\n yield from search_in_file_ios(inference_state, file_io_iterator, name,\n limit_reduction=limit_reduction)\n\n\ndef search_in_file_ios(inference_state, file_io_iterator, name,\n limit_reduction=1, complete=False):\n parse_limit = _PARSED_FILE_LIMIT / limit_reduction\n open_limit = _OPENED_FILE_LIMIT / limit_reduction\n file_io_count = 0\n parsed_file_count = 0\n regex = re.compile(r'\b' + re.escape(name) + (r'' if complete else r'\b'))\n for file_io in file_io_iterator:\n file_io_count += 1\n m = _check_fs(inference_state, file_io, regex)\n if m is not None:\n parsed_file_count += 1\n yield m\n if parsed_file_count >= parse_limit:\n dbg('Hit limit of parsed files: %s', parse_limit)\n break\n\n if file_io_count >= open_limit:\n dbg('Hit limit of opened files: %s', open_limit)\n break\n
|
.venv\Lib\site-packages\jedi\inference\references.py
|
references.py
|
Python
| 11,407 | 0.95 | 0.282132 | 0.065134 |
vue-tools
| 701 |
2024-03-17T02:33:22.771594
|
Apache-2.0
| false |
8becdc9becdfe6b82cb0a97f04393331
|
from inspect import Parameter\n\nfrom jedi.cache import memoize_method\nfrom jedi import debug\nfrom jedi import parser_utils\n\n\nclass _SignatureMixin:\n def to_string(self):\n def param_strings():\n is_positional = False\n is_kw_only = False\n for n in self.get_param_names(resolve_stars=True):\n kind = n.get_kind()\n is_positional |= kind == Parameter.POSITIONAL_ONLY\n if is_positional and kind != Parameter.POSITIONAL_ONLY:\n yield '/'\n is_positional = False\n\n if kind == Parameter.VAR_POSITIONAL:\n is_kw_only = True\n elif kind == Parameter.KEYWORD_ONLY and not is_kw_only:\n yield '*'\n is_kw_only = True\n\n yield n.to_string()\n\n if is_positional:\n yield '/'\n\n s = self.name.string_name + '(' + ', '.join(param_strings()) + ')'\n annotation = self.annotation_string\n if annotation:\n s += ' -> ' + annotation\n return s\n\n\nclass AbstractSignature(_SignatureMixin):\n def __init__(self, value, is_bound=False):\n self.value = value\n self.is_bound = is_bound\n\n @property\n def name(self):\n return self.value.name\n\n @property\n def annotation_string(self):\n return ''\n\n def get_param_names(self, resolve_stars=False):\n param_names = self._function_value.get_param_names()\n if self.is_bound:\n return param_names[1:]\n return param_names\n\n def bind(self, value):\n raise NotImplementedError\n\n def matches_signature(self, arguments):\n return True\n\n def __repr__(self):\n if self.value is self._function_value:\n return '<%s: %s>' % (self.__class__.__name__, self.value)\n return '<%s: %s, %s>' % (self.__class__.__name__, self.value, self._function_value)\n\n\nclass TreeSignature(AbstractSignature):\n def __init__(self, value, function_value=None, is_bound=False):\n super().__init__(value, is_bound)\n self._function_value = function_value or value\n\n def bind(self, value):\n return TreeSignature(value, self._function_value, is_bound=True)\n\n @property\n def _annotation(self):\n # Classes don't need annotations, even if __init__ has one. They always\n # return themselves.\n if self.value.is_class():\n return None\n return self._function_value.tree_node.annotation\n\n @property\n def annotation_string(self):\n a = self._annotation\n if a is None:\n return ''\n return a.get_code(include_prefix=False)\n\n @memoize_method\n def get_param_names(self, resolve_stars=False):\n params = self._function_value.get_param_names()\n if resolve_stars:\n from jedi.inference.star_args import process_params\n params = process_params(params)\n if self.is_bound:\n return params[1:]\n return params\n\n def matches_signature(self, arguments):\n from jedi.inference.param import get_executed_param_names_and_issues\n executed_param_names, issues = \\n get_executed_param_names_and_issues(self._function_value, arguments)\n if issues:\n return False\n\n matches = all(executed_param_name.matches_signature()\n for executed_param_name in executed_param_names)\n if debug.enable_notice:\n tree_node = self._function_value.tree_node\n signature = parser_utils.get_signature(tree_node)\n if matches:\n debug.dbg("Overloading match: %s@%s (%s)",\n signature, tree_node.start_pos[0], arguments, color='BLUE')\n else:\n debug.dbg("Overloading no match: %s@%s (%s)",\n signature, tree_node.start_pos[0], arguments, color='BLUE')\n return matches\n\n\nclass BuiltinSignature(AbstractSignature):\n def __init__(self, value, return_string, function_value=None, is_bound=False):\n super().__init__(value, is_bound)\n self._return_string = return_string\n self.__function_value = function_value\n\n @property\n def annotation_string(self):\n return self._return_string\n\n @property\n def _function_value(self):\n if self.__function_value is None:\n return self.value\n return self.__function_value\n\n def bind(self, value):\n return BuiltinSignature(\n value, self._return_string,\n function_value=self.value,\n is_bound=True\n )\n\n\nclass SignatureWrapper(_SignatureMixin):\n def __init__(self, wrapped_signature):\n self._wrapped_signature = wrapped_signature\n\n def __getattr__(self, name):\n return getattr(self._wrapped_signature, name)\n
|
.venv\Lib\site-packages\jedi\inference\signature.py
|
signature.py
|
Python
| 4,859 | 0.95 | 0.282895 | 0.016529 |
node-utils
| 851 |
2025-01-05T04:35:12.022905
|
GPL-3.0
| false |
f0de3bceda2c4d3173a8ac7e94241244
|
"""\nThis module is responsible for inferring *args and **kwargs for signatures.\n\nThis means for example in this case::\n\n def foo(a, b, c): ...\n\n def bar(*args):\n return foo(1, *args)\n\nThe signature here for bar should be `bar(b, c)` instead of bar(*args).\n"""\nfrom inspect import Parameter\n\nfrom parso import tree\n\nfrom jedi.inference.utils import to_list\nfrom jedi.inference.names import ParamNameWrapper\nfrom jedi.inference.helpers import is_big_annoying_library\n\n\ndef _iter_nodes_for_param(param_name):\n from parso.python.tree import search_ancestor\n from jedi.inference.arguments import TreeArguments\n\n execution_context = param_name.parent_context\n # Walk up the parso tree to get the FunctionNode we want. We use the parso\n # tree rather than going via the execution context so that we're agnostic of\n # the specific scope we're evaluating within (i.e: module or function,\n # etc.).\n function_node = tree.search_ancestor(param_name.tree_name, 'funcdef', 'lambdef')\n module_node = function_node.get_root_node()\n start = function_node.children[-1].start_pos\n end = function_node.children[-1].end_pos\n for name in module_node.get_used_names().get(param_name.string_name):\n if start <= name.start_pos < end:\n # Is used in the function\n argument = name.parent\n if argument.type == 'argument' \\n and argument.children[0] == '*' * param_name.star_count:\n trailer = search_ancestor(argument, 'trailer')\n if trailer is not None: # Make sure we're in a function\n context = execution_context.create_context(trailer)\n if _goes_to_param_name(param_name, context, name):\n values = _to_callables(context, trailer)\n\n args = TreeArguments.create_cached(\n execution_context.inference_state,\n context=context,\n argument_node=trailer.children[1],\n trailer=trailer,\n )\n for c in values:\n yield c, args\n\n\ndef _goes_to_param_name(param_name, context, potential_name):\n if potential_name.type != 'name':\n return False\n from jedi.inference.names import TreeNameDefinition\n found = TreeNameDefinition(context, potential_name).goto()\n return any(param_name.parent_context == p.parent_context\n and param_name.start_pos == p.start_pos\n for p in found)\n\n\ndef _to_callables(context, trailer):\n from jedi.inference.syntax_tree import infer_trailer\n\n atom_expr = trailer.parent\n index = atom_expr.children[0] == 'await'\n # Infer atom first\n values = context.infer_node(atom_expr.children[index])\n for trailer2 in atom_expr.children[index + 1:]:\n if trailer == trailer2:\n break\n values = infer_trailer(context, values, trailer2)\n return values\n\n\ndef _remove_given_params(arguments, param_names):\n count = 0\n used_keys = set()\n for key, _ in arguments.unpack():\n if key is None:\n count += 1\n else:\n used_keys.add(key)\n\n for p in param_names:\n if count and p.maybe_positional_argument():\n count -= 1\n continue\n if p.string_name in used_keys and p.maybe_keyword_argument():\n continue\n yield p\n\n\n@to_list\ndef process_params(param_names, star_count=3): # default means both * and **\n if param_names:\n if is_big_annoying_library(param_names[0].parent_context):\n # At first this feature can look innocent, but it does a lot of\n # type inference in some cases, so we just ditch it.\n yield from param_names\n return\n\n used_names = set()\n arg_callables = []\n kwarg_callables = []\n\n kw_only_names = []\n kwarg_names = []\n arg_names = []\n original_arg_name = None\n original_kwarg_name = None\n for p in param_names:\n kind = p.get_kind()\n if kind == Parameter.VAR_POSITIONAL:\n if star_count & 1:\n arg_callables = _iter_nodes_for_param(p)\n original_arg_name = p\n elif p.get_kind() == Parameter.VAR_KEYWORD:\n if star_count & 2:\n kwarg_callables = list(_iter_nodes_for_param(p))\n original_kwarg_name = p\n elif kind == Parameter.KEYWORD_ONLY:\n if star_count & 2:\n kw_only_names.append(p)\n elif kind == Parameter.POSITIONAL_ONLY:\n if star_count & 1:\n yield p\n else:\n if star_count == 1:\n yield ParamNameFixedKind(p, Parameter.POSITIONAL_ONLY)\n elif star_count == 2:\n kw_only_names.append(ParamNameFixedKind(p, Parameter.KEYWORD_ONLY))\n else:\n used_names.add(p.string_name)\n yield p\n\n # First process *args\n longest_param_names = ()\n found_arg_signature = False\n found_kwarg_signature = False\n for func_and_argument in arg_callables:\n func, arguments = func_and_argument\n new_star_count = star_count\n if func_and_argument in kwarg_callables:\n kwarg_callables.remove(func_and_argument)\n else:\n new_star_count = 1\n\n for signature in func.get_signatures():\n found_arg_signature = True\n if new_star_count == 3:\n found_kwarg_signature = True\n args_for_this_func = []\n for p in process_params(\n list(_remove_given_params(\n arguments,\n signature.get_param_names(resolve_stars=False)\n )), new_star_count):\n if p.get_kind() == Parameter.VAR_KEYWORD:\n kwarg_names.append(p)\n elif p.get_kind() == Parameter.VAR_POSITIONAL:\n arg_names.append(p)\n elif p.get_kind() == Parameter.KEYWORD_ONLY:\n kw_only_names.append(p)\n else:\n args_for_this_func.append(p)\n if len(args_for_this_func) > len(longest_param_names):\n longest_param_names = args_for_this_func\n\n for p in longest_param_names:\n if star_count == 1 and p.get_kind() != Parameter.VAR_POSITIONAL:\n yield ParamNameFixedKind(p, Parameter.POSITIONAL_ONLY)\n else:\n if p.get_kind() == Parameter.POSITIONAL_OR_KEYWORD:\n used_names.add(p.string_name)\n yield p\n\n if not found_arg_signature and original_arg_name is not None:\n yield original_arg_name\n elif arg_names:\n yield arg_names[0]\n\n # Then process **kwargs\n for func, arguments in kwarg_callables:\n for signature in func.get_signatures():\n found_kwarg_signature = True\n for p in process_params(\n list(_remove_given_params(\n arguments,\n signature.get_param_names(resolve_stars=False)\n )), star_count=2):\n if p.get_kind() == Parameter.VAR_KEYWORD:\n kwarg_names.append(p)\n elif p.get_kind() == Parameter.KEYWORD_ONLY:\n kw_only_names.append(p)\n\n for p in kw_only_names:\n if p.string_name in used_names:\n continue\n yield p\n used_names.add(p.string_name)\n\n if not found_kwarg_signature and original_kwarg_name is not None:\n yield original_kwarg_name\n elif kwarg_names:\n yield kwarg_names[0]\n\n\nclass ParamNameFixedKind(ParamNameWrapper):\n def __init__(self, param_name, new_kind):\n super().__init__(param_name)\n self._new_kind = new_kind\n\n def get_kind(self):\n return self._new_kind\n
|
.venv\Lib\site-packages\jedi\inference\star_args.py
|
star_args.py
|
Python
| 7,895 | 0.95 | 0.268182 | 0.053191 |
python-kit
| 8 |
2024-08-19T12:10:53.275064
|
BSD-3-Clause
| false |
cd9eb28fb1eeeec54d71afd44436a607
|
import os\nimport re\nfrom pathlib import Path\nfrom importlib.machinery import all_suffixes\n\nfrom jedi.inference.cache import inference_state_method_cache\nfrom jedi.inference.base_value import ContextualizedNode\nfrom jedi.inference.helpers import is_string, get_str_or_none\nfrom jedi.parser_utils import get_cached_code_lines\nfrom jedi.file_io import FileIO\nfrom jedi import settings\nfrom jedi import debug\n\n_BUILDOUT_PATH_INSERTION_LIMIT = 10\n\n\ndef _abs_path(module_context, str_path: str):\n path = Path(str_path)\n if path.is_absolute():\n return path\n\n module_path = module_context.py__file__()\n if module_path is None:\n # In this case we have no idea where we actually are in the file\n # system.\n return None\n\n base_dir = module_path.parent\n return base_dir.joinpath(path).absolute()\n\n\ndef _paths_from_assignment(module_context, expr_stmt):\n """\n Extracts the assigned strings from an assignment that looks as follows::\n\n sys.path[0:0] = ['module/path', 'another/module/path']\n\n This function is in general pretty tolerant (and therefore 'buggy').\n However, it's not a big issue usually to add more paths to Jedi's sys_path,\n because it will only affect Jedi in very random situations and by adding\n more paths than necessary, it usually benefits the general user.\n """\n for assignee, operator in zip(expr_stmt.children[::2], expr_stmt.children[1::2]):\n try:\n assert operator in ['=', '+=']\n assert assignee.type in ('power', 'atom_expr') and \\n len(assignee.children) > 1\n c = assignee.children\n assert c[0].type == 'name' and c[0].value == 'sys'\n trailer = c[1]\n assert trailer.children[0] == '.' and trailer.children[1].value == 'path'\n # TODO Essentially we're not checking details on sys.path\n # manipulation. Both assigment of the sys.path and changing/adding\n # parts of the sys.path are the same: They get added to the end of\n # the current sys.path.\n """\n execution = c[2]\n assert execution.children[0] == '['\n subscript = execution.children[1]\n assert subscript.type == 'subscript'\n assert ':' in subscript.children\n """\n except AssertionError:\n continue\n\n cn = ContextualizedNode(module_context.create_context(expr_stmt), expr_stmt)\n for lazy_value in cn.infer().iterate(cn):\n for value in lazy_value.infer():\n if is_string(value):\n abs_path = _abs_path(module_context, value.get_safe_value())\n if abs_path is not None:\n yield abs_path\n\n\ndef _paths_from_list_modifications(module_context, trailer1, trailer2):\n """ extract the path from either "sys.path.append" or "sys.path.insert" """\n # Guarantee that both are trailers, the first one a name and the second one\n # a function execution with at least one param.\n if not (trailer1.type == 'trailer' and trailer1.children[0] == '.'\n and trailer2.type == 'trailer' and trailer2.children[0] == '('\n and len(trailer2.children) == 3):\n return\n\n name = trailer1.children[1].value\n if name not in ['insert', 'append']:\n return\n arg = trailer2.children[1]\n if name == 'insert' and len(arg.children) in (3, 4): # Possible trailing comma.\n arg = arg.children[2]\n\n for value in module_context.create_context(arg).infer_node(arg):\n p = get_str_or_none(value)\n if p is None:\n continue\n abs_path = _abs_path(module_context, p)\n if abs_path is not None:\n yield abs_path\n\n\n@inference_state_method_cache(default=[])\ndef check_sys_path_modifications(module_context):\n """\n Detect sys.path modifications within module.\n """\n def get_sys_path_powers(names):\n for name in names:\n power = name.parent.parent\n if power is not None and power.type in ('power', 'atom_expr'):\n c = power.children\n if c[0].type == 'name' and c[0].value == 'sys' \\n and c[1].type == 'trailer':\n n = c[1].children[1]\n if n.type == 'name' and n.value == 'path':\n yield name, power\n\n if module_context.tree_node is None:\n return []\n\n added = []\n try:\n possible_names = module_context.tree_node.get_used_names()['path']\n except KeyError:\n pass\n else:\n for name, power in get_sys_path_powers(possible_names):\n expr_stmt = power.parent\n if len(power.children) >= 4:\n added.extend(\n _paths_from_list_modifications(\n module_context, *power.children[2:4]\n )\n )\n elif expr_stmt is not None and expr_stmt.type == 'expr_stmt':\n added.extend(_paths_from_assignment(module_context, expr_stmt))\n return added\n\n\ndef discover_buildout_paths(inference_state, script_path):\n buildout_script_paths = set()\n\n for buildout_script_path in _get_buildout_script_paths(script_path):\n for path in _get_paths_from_buildout_script(inference_state, buildout_script_path):\n buildout_script_paths.add(path)\n if len(buildout_script_paths) >= _BUILDOUT_PATH_INSERTION_LIMIT:\n break\n\n return buildout_script_paths\n\n\ndef _get_paths_from_buildout_script(inference_state, buildout_script_path):\n file_io = FileIO(str(buildout_script_path))\n try:\n module_node = inference_state.parse(\n file_io=file_io,\n cache=True,\n cache_path=settings.cache_directory\n )\n except IOError:\n debug.warning('Error trying to read buildout_script: %s', buildout_script_path)\n return\n\n from jedi.inference.value import ModuleValue\n module_context = ModuleValue(\n inference_state, module_node,\n file_io=file_io,\n string_names=None,\n code_lines=get_cached_code_lines(inference_state.grammar, buildout_script_path),\n ).as_context()\n yield from check_sys_path_modifications(module_context)\n\n\ndef _get_parent_dir_with_file(path: Path, filename):\n for parent in path.parents:\n try:\n if parent.joinpath(filename).is_file():\n return parent\n except OSError:\n continue\n return None\n\n\ndef _get_buildout_script_paths(search_path: Path):\n """\n if there is a 'buildout.cfg' file in one of the parent directories of the\n given module it will return a list of all files in the buildout bin\n directory that look like python files.\n\n :param search_path: absolute path to the module.\n """\n project_root = _get_parent_dir_with_file(search_path, 'buildout.cfg')\n if not project_root:\n return\n bin_path = project_root.joinpath('bin')\n if not bin_path.exists():\n return\n\n for filename in os.listdir(bin_path):\n try:\n filepath = bin_path.joinpath(filename)\n with open(filepath, 'r') as f:\n firstline = f.readline()\n if firstline.startswith('#!') and 'python' in firstline:\n yield filepath\n except (UnicodeDecodeError, IOError) as e:\n # Probably a binary file; permission error or race cond. because\n # file got deleted. Ignore it.\n debug.warning(str(e))\n continue\n\n\ndef remove_python_path_suffix(path):\n for suffix in all_suffixes() + ['.pyi']:\n if path.suffix == suffix:\n path = path.with_name(path.stem)\n break\n return path\n\n\ndef transform_path_to_dotted(sys_path, module_path):\n """\n Returns the dotted path inside a sys.path as a list of names. e.g.\n\n >>> transform_path_to_dotted([str(Path("/foo").absolute())], Path('/foo/bar/baz.py').absolute())\n (('bar', 'baz'), False)\n\n Returns (None, False) if the path doesn't really resolve to anything.\n The second return part is if it is a package.\n """\n # First remove the suffix.\n module_path = remove_python_path_suffix(module_path)\n if module_path.name.startswith('.'):\n return None, False\n\n # Once the suffix was removed we are using the files as we know them. This\n # means that if someone uses an ending like .vim for a Python file, .vim\n # will be part of the returned dotted part.\n\n is_package = module_path.name == '__init__'\n if is_package:\n module_path = module_path.parent\n\n def iter_potential_solutions():\n for p in sys_path:\n if str(module_path).startswith(p):\n # Strip the trailing slash/backslash\n rest = str(module_path)[len(p):]\n # On Windows a path can also use a slash.\n if rest.startswith(os.path.sep) or rest.startswith('/'):\n # Remove a slash in cases it's still there.\n rest = rest[1:]\n\n if rest:\n split = rest.split(os.path.sep)\n if not all(split):\n # This means that part of the file path was empty, this\n # is very strange and is probably a file that is called\n # `.py`.\n return\n # Stub folders for foo can end with foo-stubs. Just remove\n # it.\n yield tuple(re.sub(r'-stubs$', '', s) for s in split)\n\n potential_solutions = tuple(iter_potential_solutions())\n if not potential_solutions:\n return None, False\n # Try to find the shortest path, this makes more sense usually, because the\n # user usually has venvs somewhere. This means that a path like\n # .tox/py37/lib/python3.7/os.py can be normal for a file. However in that\n # case we definitely want to return ['os'] as a path and not a crazy\n # ['.tox', 'py37', 'lib', 'python3.7', 'os']. Keep in mind that this is a\n # heuristic and there's now ay to "always" do it right.\n return sorted(potential_solutions, key=lambda p: len(p))[0], is_package\n
|
.venv\Lib\site-packages\jedi\inference\sys_path.py
|
sys_path.py
|
Python
| 10,218 | 0.95 | 0.242647 | 0.122271 |
awesome-app
| 922 |
2025-06-28T23:09:26.279649
|
GPL-3.0
| false |
12054e072478660a21a1322053c57e00
|
""" A universal module with functions / classes without dependencies. """\nimport functools\nimport re\nimport os\n\n\n_sep = os.path.sep\nif os.path.altsep is not None:\n _sep += os.path.altsep\n_path_re = re.compile(r'(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep)))\ndel _sep\n\n\ndef to_list(func):\n def wrapper(*args, **kwargs):\n return list(func(*args, **kwargs))\n return wrapper\n\n\ndef to_tuple(func):\n def wrapper(*args, **kwargs):\n return tuple(func(*args, **kwargs))\n return wrapper\n\n\ndef unite(iterable):\n """Turns a two dimensional array into a one dimensional."""\n return set(typ for types in iterable for typ in types)\n\n\nclass UncaughtAttributeError(Exception):\n """\n Important, because `__getattr__` and `hasattr` catch AttributeErrors\n implicitly. This is really evil (mainly because of `__getattr__`).\n Therefore this class originally had to be derived from `BaseException`\n instead of `Exception`. But because I removed relevant `hasattr` from\n the code base, we can now switch back to `Exception`.\n\n :param base: return values of sys.exc_info().\n """\n\n\ndef safe_property(func):\n return property(reraise_uncaught(func))\n\n\ndef reraise_uncaught(func):\n """\n Re-throw uncaught `AttributeError`.\n\n Usage: Put ``@rethrow_uncaught`` in front of the function\n which does **not** suppose to raise `AttributeError`.\n\n AttributeError is easily get caught by `hasattr` and another\n ``except AttributeError`` clause. This becomes problem when you use\n a lot of "dynamic" attributes (e.g., using ``@property``) because you\n can't distinguish if the property does not exist for real or some code\n inside of the "dynamic" attribute through that error. In a well\n written code, such error should not exist but getting there is very\n difficult. This decorator is to help us getting there by changing\n `AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.\n This helps us noticing bugs earlier and facilitates debugging.\n """\n @functools.wraps(func)\n def wrapper(*args, **kwds):\n try:\n return func(*args, **kwds)\n except AttributeError as e:\n raise UncaughtAttributeError(e) from e\n return wrapper\n\n\nclass PushBackIterator:\n def __init__(self, iterator):\n self.pushes = []\n self.iterator = iterator\n self.current = None\n\n def push_back(self, value):\n self.pushes.append(value)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.pushes:\n self.current = self.pushes.pop()\n else:\n self.current = next(self.iterator)\n return self.current\n
|
.venv\Lib\site-packages\jedi\inference\utils.py
|
utils.py
|
Python
| 2,706 | 0.85 | 0.288889 | 0 |
python-kit
| 314 |
2024-02-02T15:38:08.633775
|
GPL-3.0
| false |
a450423a177358060612e2177fd6c393
|
import inspect\nimport types\nimport traceback\nimport sys\nimport operator as op\nfrom collections import namedtuple\nimport warnings\nimport re\nimport builtins\nimport typing\nfrom pathlib import Path\nfrom typing import Optional, Tuple\n\nfrom jedi.inference.compiled.getattr_static import getattr_static\n\nALLOWED_GETITEM_TYPES = (str, list, tuple, bytes, bytearray, dict)\n\nMethodDescriptorType = type(str.replace)\n# These are not considered classes and access is granted even though they have\n# a __class__ attribute.\nNOT_CLASS_TYPES = (\n types.BuiltinFunctionType,\n types.CodeType,\n types.FrameType,\n types.FunctionType,\n types.GeneratorType,\n types.GetSetDescriptorType,\n types.LambdaType,\n types.MemberDescriptorType,\n types.MethodType,\n types.ModuleType,\n types.TracebackType,\n MethodDescriptorType,\n types.MappingProxyType,\n types.SimpleNamespace,\n types.DynamicClassAttribute,\n)\n\n# Those types don't exist in typing.\nMethodDescriptorType = type(str.replace)\nWrapperDescriptorType = type(set.__iter__)\n# `object.__subclasshook__` is an already executed descriptor.\nobject_class_dict = type.__dict__["__dict__"].__get__(object) # type: ignore[index]\nClassMethodDescriptorType = type(object_class_dict['__subclasshook__'])\n\n_sentinel = object()\n\n# Maps Python syntax to the operator module.\nCOMPARISON_OPERATORS = {\n '==': op.eq,\n '!=': op.ne,\n 'is': op.is_,\n 'is not': op.is_not,\n '<': op.lt,\n '<=': op.le,\n '>': op.gt,\n '>=': op.ge,\n}\n\n_OPERATORS = {\n '+': op.add,\n '-': op.sub,\n}\n_OPERATORS.update(COMPARISON_OPERATORS)\n\nALLOWED_DESCRIPTOR_ACCESS = (\n types.FunctionType,\n types.GetSetDescriptorType,\n types.MemberDescriptorType,\n MethodDescriptorType,\n WrapperDescriptorType,\n ClassMethodDescriptorType,\n staticmethod,\n classmethod,\n)\n\n\ndef safe_getattr(obj, name, default=_sentinel):\n try:\n attr, is_get_descriptor = getattr_static(obj, name)\n except AttributeError:\n if default is _sentinel:\n raise\n return default\n else:\n if isinstance(attr, ALLOWED_DESCRIPTOR_ACCESS):\n # In case of descriptors that have get methods we cannot return\n # it's value, because that would mean code execution.\n # Since it's an isinstance call, code execution is still possible,\n # but this is not really a security feature, but much more of a\n # safety feature. Code execution is basically always possible when\n # a module is imported. This is here so people don't shoot\n # themselves in the foot.\n return getattr(obj, name)\n return attr\n\n\nSignatureParam = namedtuple(\n 'SignatureParam',\n 'name has_default default default_string has_annotation annotation annotation_string kind_name'\n)\n\n\ndef shorten_repr(func):\n def wrapper(self):\n r = func(self)\n if len(r) > 50:\n r = r[:50] + '..'\n return r\n return wrapper\n\n\ndef create_access(inference_state, obj):\n return inference_state.compiled_subprocess.get_or_create_access_handle(obj)\n\n\ndef load_module(inference_state, dotted_name, sys_path):\n temp, sys.path = sys.path, sys_path\n try:\n __import__(dotted_name)\n except ImportError:\n # If a module is "corrupt" or not really a Python module or whatever.\n warnings.warn(\n "Module %s not importable in path %s." % (dotted_name, sys_path),\n UserWarning,\n stacklevel=2,\n )\n return None\n except Exception:\n # Since __import__ pretty much makes code execution possible, just\n # catch any error here and print it.\n warnings.warn(\n "Cannot import:\n%s" % traceback.format_exc(), UserWarning, stacklevel=2\n )\n return None\n finally:\n sys.path = temp\n\n # Just access the cache after import, because of #59 as well as the very\n # complicated import structure of Python.\n module = sys.modules[dotted_name]\n return create_access_path(inference_state, module)\n\n\nclass AccessPath:\n def __init__(self, accesses):\n self.accesses = accesses\n\n\ndef create_access_path(inference_state, obj) -> AccessPath:\n access = create_access(inference_state, obj)\n return AccessPath(access.get_access_path_tuples())\n\n\ndef get_api_type(obj):\n if inspect.isclass(obj):\n return 'class'\n elif inspect.ismodule(obj):\n return 'module'\n elif inspect.isbuiltin(obj) or inspect.ismethod(obj) \\n or inspect.ismethoddescriptor(obj) or inspect.isfunction(obj):\n return 'function'\n # Everything else...\n return 'instance'\n\n\nclass DirectObjectAccess:\n def __init__(self, inference_state, obj):\n self._inference_state = inference_state\n self._obj = obj\n\n def __repr__(self):\n return '%s(%s)' % (self.__class__.__name__, self.get_repr())\n\n def _create_access(self, obj):\n return create_access(self._inference_state, obj)\n\n def _create_access_path(self, obj) -> AccessPath:\n return create_access_path(self._inference_state, obj)\n\n def py__bool__(self):\n return bool(self._obj)\n\n def py__file__(self) -> Optional[Path]:\n try:\n return Path(self._obj.__file__)\n except AttributeError:\n return None\n\n def py__doc__(self):\n return inspect.getdoc(self._obj) or ''\n\n def py__name__(self):\n if not _is_class_instance(self._obj) or \\n inspect.ismethoddescriptor(self._obj): # slots\n cls = self._obj\n else:\n try:\n cls = self._obj.__class__\n except AttributeError:\n # happens with numpy.core.umath._UFUNC_API (you get it\n # automatically by doing `import numpy`.\n return None\n\n try:\n return cls.__name__\n except AttributeError:\n return None\n\n def py__mro__accesses(self):\n return tuple(self._create_access_path(cls) for cls in self._obj.__mro__[1:])\n\n def py__getitem__all_values(self):\n if isinstance(self._obj, dict):\n return [self._create_access_path(v) for v in self._obj.values()]\n if isinstance(self._obj, (list, tuple)):\n return [self._create_access_path(v) for v in self._obj]\n\n if self.is_instance():\n cls = DirectObjectAccess(self._inference_state, self._obj.__class__)\n return cls.py__getitem__all_values()\n\n try:\n getitem = self._obj.__getitem__\n except AttributeError:\n pass\n else:\n annotation = DirectObjectAccess(self._inference_state, getitem).get_return_annotation()\n if annotation is not None:\n return [annotation]\n return None\n\n def py__simple_getitem__(self, index, *, safe=True):\n if safe and type(self._obj) not in ALLOWED_GETITEM_TYPES:\n # Get rid of side effects, we won't call custom `__getitem__`s.\n return None\n\n return self._create_access_path(self._obj[index])\n\n def py__iter__list(self):\n try:\n iter_method = self._obj.__iter__\n except AttributeError:\n return None\n else:\n p = DirectObjectAccess(self._inference_state, iter_method).get_return_annotation()\n if p is not None:\n return [p]\n\n if type(self._obj) not in ALLOWED_GETITEM_TYPES:\n # Get rid of side effects, we won't call custom `__getitem__`s.\n return []\n\n lst = []\n for i, part in enumerate(self._obj):\n if i > 20:\n # Should not go crazy with large iterators\n break\n lst.append(self._create_access_path(part))\n return lst\n\n def py__class__(self):\n return self._create_access_path(self._obj.__class__)\n\n def py__bases__(self):\n return [self._create_access_path(base) for base in self._obj.__bases__]\n\n def py__path__(self):\n paths = getattr(self._obj, '__path__', None)\n # Avoid some weird hacks that would just fail, because they cannot be\n # used by pickle.\n if not isinstance(paths, list) \\n or not all(isinstance(p, str) for p in paths):\n return None\n return paths\n\n @shorten_repr\n def get_repr(self):\n if inspect.ismodule(self._obj):\n return repr(self._obj)\n # Try to avoid execution of the property.\n if safe_getattr(self._obj, '__module__', default='') == 'builtins':\n return repr(self._obj)\n\n type_ = type(self._obj)\n if type_ == type:\n return type.__repr__(self._obj)\n\n if safe_getattr(type_, '__module__', default='') == 'builtins':\n # Allow direct execution of repr for builtins.\n return repr(self._obj)\n return object.__repr__(self._obj)\n\n def is_class(self):\n return inspect.isclass(self._obj)\n\n def is_function(self):\n return inspect.isfunction(self._obj) or inspect.ismethod(self._obj)\n\n def is_module(self):\n return inspect.ismodule(self._obj)\n\n def is_instance(self):\n return _is_class_instance(self._obj)\n\n def ismethoddescriptor(self):\n return inspect.ismethoddescriptor(self._obj)\n\n def get_qualified_names(self):\n def try_to_get_name(obj):\n return getattr(obj, '__qualname__', getattr(obj, '__name__', None))\n\n if self.is_module():\n return ()\n name = try_to_get_name(self._obj)\n if name is None:\n name = try_to_get_name(type(self._obj))\n if name is None:\n return ()\n return tuple(name.split('.'))\n\n def dir(self):\n return dir(self._obj)\n\n def has_iter(self):\n try:\n iter(self._obj)\n return True\n except TypeError:\n return False\n\n def is_allowed_getattr(self, name, safe=True) -> Tuple[bool, bool, Optional[AccessPath]]:\n # TODO this API is ugly.\n try:\n attr, is_get_descriptor = getattr_static(self._obj, name)\n except AttributeError:\n if not safe:\n # Unsafe is mostly used to check for __getattr__/__getattribute__.\n # getattr_static works for properties, but the underscore methods\n # are just ignored (because it's safer and avoids more code\n # execution). See also GH #1378.\n\n # Avoid warnings, see comment in the next function.\n with warnings.catch_warnings(record=True):\n warnings.simplefilter("always")\n try:\n return hasattr(self._obj, name), False, None\n except Exception:\n # Obviously has an attribute (probably a property) that\n # gets executed, so just avoid all exceptions here.\n pass\n return False, False, None\n else:\n if is_get_descriptor and type(attr) not in ALLOWED_DESCRIPTOR_ACCESS:\n if isinstance(attr, property):\n if hasattr(attr.fget, '__annotations__'):\n a = DirectObjectAccess(self._inference_state, attr.fget)\n return True, True, a.get_return_annotation()\n # In case of descriptors that have get methods we cannot return\n # it's value, because that would mean code execution.\n return True, True, None\n return True, False, None\n\n def getattr_paths(self, name, default=_sentinel):\n try:\n # Make sure no warnings are printed here, this is autocompletion,\n # warnings should not be shown. See also GH #1383.\n with warnings.catch_warnings(record=True):\n warnings.simplefilter("always")\n return_obj = getattr(self._obj, name)\n except Exception as e:\n if default is _sentinel:\n if isinstance(e, AttributeError):\n # Happens e.g. in properties of\n # PyQt4.QtGui.QStyleOptionComboBox.currentText\n # -> just set it to None\n raise\n # Just in case anything happens, return an AttributeError. It\n # should not crash.\n raise AttributeError\n return_obj = default\n access = self._create_access(return_obj)\n if inspect.ismodule(return_obj):\n return [access]\n\n try:\n module = return_obj.__module__\n except AttributeError:\n pass\n else:\n if module is not None and isinstance(module, str):\n try:\n __import__(module)\n # For some modules like _sqlite3, the __module__ for classes is\n # different, in this case it's sqlite3. So we have to try to\n # load that "original" module, because it's not loaded yet. If\n # we don't do that, we don't really have a "parent" module and\n # we would fall back to builtins.\n except ImportError:\n pass\n\n module = inspect.getmodule(return_obj)\n if module is None:\n module = inspect.getmodule(type(return_obj))\n if module is None:\n module = builtins\n return [self._create_access(module), access]\n\n def get_safe_value(self):\n if type(self._obj) in (bool, bytes, float, int, str, slice) or self._obj is None:\n return self._obj\n raise ValueError("Object is type %s and not simple" % type(self._obj))\n\n def get_api_type(self):\n return get_api_type(self._obj)\n\n def get_array_type(self):\n if isinstance(self._obj, dict):\n return 'dict'\n return None\n\n def get_key_paths(self):\n def iter_partial_keys():\n # We could use list(keys()), but that might take a lot more memory.\n for (i, k) in enumerate(self._obj.keys()):\n # Limit key listing at some point. This is artificial, but this\n # way we don't get stalled because of slow completions\n if i > 50:\n break\n yield k\n\n return [self._create_access_path(k) for k in iter_partial_keys()]\n\n def get_access_path_tuples(self):\n accesses = [create_access(self._inference_state, o) for o in self._get_objects_path()]\n return [(access.py__name__(), access) for access in accesses]\n\n def _get_objects_path(self):\n def get():\n obj = self._obj\n yield obj\n try:\n obj = obj.__objclass__\n except AttributeError:\n pass\n else:\n yield obj\n\n try:\n # Returns a dotted string path.\n imp_plz = obj.__module__\n except AttributeError:\n # Unfortunately in some cases like `int` there's no __module__\n if not inspect.ismodule(obj):\n yield builtins\n else:\n if imp_plz is None:\n # Happens for example in `(_ for _ in []).send.__module__`.\n yield builtins\n else:\n try:\n yield sys.modules[imp_plz]\n except KeyError:\n # __module__ can be something arbitrary that doesn't exist.\n yield builtins\n\n return list(reversed(list(get())))\n\n def execute_operation(self, other_access_handle, operator):\n other_access = other_access_handle.access\n op = _OPERATORS[operator]\n return self._create_access_path(op(self._obj, other_access._obj))\n\n def get_annotation_name_and_args(self):\n """\n Returns Tuple[Optional[str], Tuple[AccessPath, ...]]\n """\n name = None\n args = ()\n if safe_getattr(self._obj, '__module__', default='') == 'typing':\n m = re.match(r'typing.(\w+)\[', repr(self._obj))\n if m is not None:\n name = m.group(1)\n\n import typing\n if sys.version_info >= (3, 8):\n args = typing.get_args(self._obj)\n else:\n args = safe_getattr(self._obj, '__args__', default=None)\n return name, tuple(self._create_access_path(arg) for arg in args)\n\n def needs_type_completions(self):\n return inspect.isclass(self._obj) and self._obj != type\n\n def _annotation_to_str(self, annotation):\n return inspect.formatannotation(annotation)\n\n def get_signature_params(self):\n return [\n SignatureParam(\n name=p.name,\n has_default=p.default is not p.empty,\n default=self._create_access_path(p.default),\n default_string=repr(p.default),\n has_annotation=p.annotation is not p.empty,\n annotation=self._create_access_path(p.annotation),\n annotation_string=self._annotation_to_str(p.annotation),\n kind_name=str(p.kind)\n ) for p in self._get_signature().parameters.values()\n ]\n\n def _get_signature(self):\n obj = self._obj\n try:\n return inspect.signature(obj)\n except (RuntimeError, TypeError):\n # Reading the code of the function in Python 3.6 implies there are\n # at least these errors that might occur if something is wrong with\n # the signature. In that case we just want a simple escape for now.\n raise ValueError\n\n def get_return_annotation(self) -> Optional[AccessPath]:\n try:\n o = self._obj.__annotations__.get('return')\n except AttributeError:\n return None\n\n if o is None:\n return None\n\n try:\n o = typing.get_type_hints(self._obj).get('return')\n except Exception:\n pass\n\n return self._create_access_path(o)\n\n def negate(self):\n return self._create_access_path(-self._obj)\n\n def get_dir_infos(self):\n """\n Used to return a couple of infos that are needed when accessing the sub\n objects of an objects\n """\n tuples = dict(\n (name, self.is_allowed_getattr(name))\n for name in self.dir()\n )\n return self.needs_type_completions(), tuples\n\n\ndef _is_class_instance(obj):\n """Like inspect.* methods."""\n try:\n cls = obj.__class__\n except AttributeError:\n return False\n else:\n # The isinstance check for cls is just there so issubclass doesn't\n # raise an exception.\n return cls != type and isinstance(cls, type) and not issubclass(cls, NOT_CLASS_TYPES)\n
|
.venv\Lib\site-packages\jedi\inference\compiled\access.py
|
access.py
|
Python
| 18,914 | 0.95 | 0.254448 | 0.128692 |
react-lib
| 651 |
2023-12-15T00:49:40.640727
|
BSD-3-Clause
| false |
ddfa3c12b2cf63d21f1f2fcac0ae3a1b
|
"""\nA static version of getattr.\nThis is a backport of the Python 3 code with a little bit of additional\ninformation returned to enable Jedi to make decisions.\n"""\n\nimport types\n\nfrom jedi import debug\n\n_sentinel = object()\n\n\ndef _check_instance(obj, attr):\n instance_dict = {}\n try:\n instance_dict = object.__getattribute__(obj, "__dict__")\n except AttributeError:\n pass\n return dict.get(instance_dict, attr, _sentinel)\n\n\ndef _check_class(klass, attr):\n for entry in _static_getmro(klass):\n if _shadowed_dict(type(entry)) is _sentinel:\n try:\n return entry.__dict__[attr]\n except KeyError:\n pass\n return _sentinel\n\n\ndef _is_type(obj):\n try:\n _static_getmro(obj)\n except TypeError:\n return False\n return True\n\n\ndef _shadowed_dict(klass):\n dict_attr = type.__dict__["__dict__"]\n for entry in _static_getmro(klass):\n try:\n class_dict = dict_attr.__get__(entry)["__dict__"]\n except KeyError:\n pass\n else:\n if not (type(class_dict) is types.GetSetDescriptorType\n and class_dict.__name__ == "__dict__"\n and class_dict.__objclass__ is entry):\n return class_dict\n return _sentinel\n\n\ndef _static_getmro(klass):\n mro = type.__dict__['__mro__'].__get__(klass)\n if not isinstance(mro, (tuple, list)):\n # There are unfortunately no tests for this, I was not able to\n # reproduce this in pure Python. However should still solve the issue\n # raised in GH #1517.\n debug.warning('mro of %s returned %s, should be a tuple' % (klass, mro))\n return ()\n return mro\n\n\ndef _safe_hasattr(obj, name):\n return _check_class(type(obj), name) is not _sentinel\n\n\ndef _safe_is_data_descriptor(obj):\n return _safe_hasattr(obj, '__set__') or _safe_hasattr(obj, '__delete__')\n\n\ndef getattr_static(obj, attr, default=_sentinel):\n """Retrieve attributes without triggering dynamic lookup via the\n descriptor protocol, __getattr__ or __getattribute__.\n\n Note: this function may not be able to retrieve all attributes\n that getattr can fetch (like dynamically created attributes)\n and may find attributes that getattr can't (like descriptors\n that raise AttributeError). It can also return descriptor objects\n instead of instance members in some cases. See the\n documentation for details.\n\n Returns a tuple `(attr, is_get_descriptor)`. is_get_descripter means that\n the attribute is a descriptor that has a `__get__` attribute.\n """\n instance_result = _sentinel\n if not _is_type(obj):\n klass = type(obj)\n dict_attr = _shadowed_dict(klass)\n if (dict_attr is _sentinel or type(dict_attr) is types.MemberDescriptorType):\n instance_result = _check_instance(obj, attr)\n else:\n klass = obj\n\n klass_result = _check_class(klass, attr)\n\n if instance_result is not _sentinel and klass_result is not _sentinel:\n if _safe_hasattr(klass_result, '__get__') \\n and _safe_is_data_descriptor(klass_result):\n # A get/set descriptor has priority over everything.\n return klass_result, True\n\n if instance_result is not _sentinel:\n return instance_result, False\n if klass_result is not _sentinel:\n return klass_result, _safe_hasattr(klass_result, '__get__')\n\n if obj is klass:\n # for types we check the metaclass too\n for entry in _static_getmro(type(klass)):\n if _shadowed_dict(type(entry)) is _sentinel:\n try:\n return entry.__dict__[attr], False\n except KeyError:\n pass\n if default is not _sentinel:\n return default, False\n raise AttributeError(attr)\n
|
.venv\Lib\site-packages\jedi\inference\compiled\getattr_static.py
|
getattr_static.py
|
Python
| 3,862 | 0.95 | 0.264463 | 0.052083 |
node-utils
| 881 |
2023-11-24T06:37:41.236573
|
BSD-3-Clause
| false |
b1ae9eaac0d8e2c94b2de9bed8aaf01d
|
# This file also re-exports symbols for wider use. We configure mypy and flake8\n# to be aware that this file does this.\n\nfrom jedi.inference.compiled.value import CompiledValue, CompiledName, \\n CompiledValueFilter, CompiledValueName, create_from_access_path\nfrom jedi.inference.base_value import LazyValueWrapper\n\n\ndef builtin_from_name(inference_state, string):\n typing_builtins_module = inference_state.builtins_module\n if string in ('None', 'True', 'False'):\n builtins, = typing_builtins_module.non_stub_value_set\n filter_ = next(builtins.get_filters())\n else:\n filter_ = next(typing_builtins_module.get_filters())\n name, = filter_.get(string)\n value, = name.infer()\n return value\n\n\nclass ExactValue(LazyValueWrapper):\n """\n This class represents exact values, that makes operations like additions\n and exact boolean values possible, while still being a "normal" stub.\n """\n def __init__(self, compiled_value):\n self.inference_state = compiled_value.inference_state\n self._compiled_value = compiled_value\n\n def __getattribute__(self, name):\n if name in ('get_safe_value', 'execute_operation', 'access_handle',\n 'negate', 'py__bool__', 'is_compiled'):\n return getattr(self._compiled_value, name)\n return super().__getattribute__(name)\n\n def _get_wrapped_value(self):\n instance, = builtin_from_name(\n self.inference_state, self._compiled_value.name.string_name).execute_with_values()\n return instance\n\n def __repr__(self):\n return '<%s: %s>' % (self.__class__.__name__, self._compiled_value)\n\n\ndef create_simple_object(inference_state, obj):\n """\n Only allows creations of objects that are easily picklable across Python\n versions.\n """\n assert type(obj) in (int, float, str, bytes, slice, complex, bool), repr(obj)\n compiled_value = create_from_access_path(\n inference_state,\n inference_state.compiled_subprocess.create_simple_object(obj)\n )\n return ExactValue(compiled_value)\n\n\ndef get_string_value_set(inference_state):\n return builtin_from_name(inference_state, 'str').execute_with_values()\n\n\ndef load_module(inference_state, dotted_name, **kwargs):\n # Temporary, some tensorflow builtins cannot be loaded, so it's tried again\n # and again and it's really slow.\n if dotted_name.startswith('tensorflow.'):\n return None\n access_path = inference_state.compiled_subprocess.load_module(dotted_name=dotted_name, **kwargs)\n if access_path is None:\n return None\n return create_from_access_path(inference_state, access_path)\n
|
.venv\Lib\site-packages\jedi\inference\compiled\__init__.py
|
__init__.py
|
Python
| 2,651 | 0.95 | 0.228571 | 0.071429 |
vue-tools
| 648 |
2025-05-25T00:05:18.942688
|
Apache-2.0
| false |
2852d1338154685ad3f278a41cad22ab
|
import os\nimport sys\nfrom importlib.abc import MetaPathFinder\nfrom importlib.machinery import PathFinder\n\n# Remove the first entry, because it's simply a directory entry that equals\n# this directory.\ndel sys.path[0]\n\n\ndef _get_paths():\n # Get the path to jedi.\n _d = os.path.dirname\n _jedi_path = _d(_d(_d(_d(_d(__file__)))))\n _parso_path = sys.argv[1]\n # The paths are the directory that jedi and parso lie in.\n return {'jedi': _jedi_path, 'parso': _parso_path}\n\n\nclass _ExactImporter(MetaPathFinder):\n def __init__(self, path_dct):\n self._path_dct = path_dct\n\n def find_spec(self, fullname, path=None, target=None):\n if path is None and fullname in self._path_dct:\n p = self._path_dct[fullname]\n spec = PathFinder.find_spec(fullname, path=[p], target=target)\n return spec\n return None\n\n\n# Try to import jedi/parso.\nsys.meta_path.insert(0, _ExactImporter(_get_paths()))\nfrom jedi.inference.compiled import subprocess # noqa: E402\nsys.meta_path.pop(0)\n\n# Retrieve the pickle protocol.\nhost_sys_version = [int(x) for x in sys.argv[2].split('.')]\n# And finally start the client.\nsubprocess.Listener().listen()\n
|
.venv\Lib\site-packages\jedi\inference\compiled\subprocess\__main__.py
|
__main__.py
|
Python
| 1,187 | 0.95 | 0.15 | 0.225806 |
python-kit
| 842 |
2023-12-12T04:00:02.911016
|
GPL-3.0
| false |
8f4dfaf2e3acc609f39e91aab40ccfc0
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\compiled\subprocess\__pycache__\functions.cpython-313.pyc
|
functions.cpython-313.pyc
|
Other
| 10,658 | 0.95 | 0.102273 | 0 |
python-kit
| 352 |
2024-09-20T20:04:51.411328
|
GPL-3.0
| false |
aa96f980808bd48d0e581cef56acff36
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\compiled\subprocess\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 24,140 | 0.95 | 0.087649 | 0.047009 |
react-lib
| 945 |
2024-05-25T20:36:05.021037
|
GPL-3.0
| false |
73339325100035ab0bc77a11c31a47e2
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\compiled\subprocess\__pycache__\__main__.cpython-313.pyc
|
__main__.cpython-313.pyc
|
Other
| 2,159 | 0.85 | 0 | 0 |
node-utils
| 165 |
2023-10-19T03:31:34.082971
|
MIT
| false |
d45a4840dbf53a3d8fdb002aa98a6ed9
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\compiled\__pycache__\access.cpython-313.pyc
|
access.cpython-313.pyc
|
Other
| 27,485 | 0.95 | 0.005556 | 0.017241 |
python-kit
| 200 |
2024-02-03T07:23:20.080177
|
GPL-3.0
| false |
63abfcabc48845acca590e5b11aedf93
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\compiled\__pycache__\getattr_static.cpython-313.pyc
|
getattr_static.cpython-313.pyc
|
Other
| 4,924 | 0.95 | 0.031746 | 0.034483 |
vue-tools
| 514 |
2023-09-04T16:09:11.422974
|
Apache-2.0
| false |
122d3517a3643031c798a2e5a20b1aae
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\compiled\__pycache__\mixed.cpython-313.pyc
|
mixed.cpython-313.pyc
|
Other
| 14,158 | 0.95 | 0.022059 | 0.007692 |
node-utils
| 847 |
2025-06-21T02:41:12.646076
|
Apache-2.0
| false |
e63e7052d22d2404ec1b660d095b779a
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\compiled\__pycache__\value.cpython-313.pyc
|
value.cpython-313.pyc
|
Other
| 33,362 | 0.95 | 0.019324 | 0.009852 |
python-kit
| 998 |
2023-11-22T21:06:12.665261
|
BSD-3-Clause
| false |
c3e8dcc69438918b00fbc5b6ccab1121
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\compiled\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 4,018 | 0.95 | 0.071429 | 0 |
react-lib
| 499 |
2025-01-11T00:21:38.716294
|
GPL-3.0
| false |
d7a99b3a30be1c641f7c9ddb53029c24
|
from jedi import debug\nfrom jedi.inference.base_value import ValueSet, \\n NO_VALUES\nfrom jedi.inference.utils import to_list\nfrom jedi.inference.gradual.stub_value import StubModuleValue\nfrom jedi.inference.gradual.typeshed import try_to_load_stub_cached\nfrom jedi.inference.value.decorator import Decoratee\n\n\ndef _stub_to_python_value_set(stub_value, ignore_compiled=False):\n stub_module_context = stub_value.get_root_context()\n if not stub_module_context.is_stub():\n return ValueSet([stub_value])\n\n decorates = None\n if isinstance(stub_value, Decoratee):\n decorates = stub_value._original_value\n\n was_instance = stub_value.is_instance()\n if was_instance:\n arguments = getattr(stub_value, '_arguments', None)\n stub_value = stub_value.py__class__()\n\n qualified_names = stub_value.get_qualified_names()\n if qualified_names is None:\n return NO_VALUES\n\n was_bound_method = stub_value.is_bound_method()\n if was_bound_method:\n # Infer the object first. We can infer the method later.\n method_name = qualified_names[-1]\n qualified_names = qualified_names[:-1]\n was_instance = True\n arguments = None\n\n values = _infer_from_stub(stub_module_context, qualified_names, ignore_compiled)\n if was_instance:\n values = ValueSet.from_sets(\n c.execute_with_values() if arguments is None else c.execute(arguments)\n for c in values\n if c.is_class()\n )\n if was_bound_method:\n # Now that the instance has been properly created, we can simply get\n # the method.\n values = values.py__getattribute__(method_name)\n if decorates is not None:\n values = ValueSet(Decoratee(v, decorates) for v in values)\n return values\n\n\ndef _infer_from_stub(stub_module_context, qualified_names, ignore_compiled):\n from jedi.inference.compiled.mixed import MixedObject\n stub_module = stub_module_context.get_value()\n assert isinstance(stub_module, (StubModuleValue, MixedObject)), stub_module_context\n non_stubs = stub_module.non_stub_value_set\n if ignore_compiled:\n non_stubs = non_stubs.filter(lambda c: not c.is_compiled())\n for name in qualified_names:\n non_stubs = non_stubs.py__getattribute__(name)\n return non_stubs\n\n\n@to_list\ndef _try_stub_to_python_names(names, prefer_stub_to_compiled=False):\n for name in names:\n module_context = name.get_root_context()\n if not module_context.is_stub():\n yield name\n continue\n\n if name.api_type == 'module':\n values = convert_values(name.infer(), ignore_compiled=prefer_stub_to_compiled)\n if values:\n for v in values:\n yield v.name\n continue\n else:\n v = name.get_defining_qualified_value()\n if v is not None:\n converted = _stub_to_python_value_set(v, ignore_compiled=prefer_stub_to_compiled)\n if converted:\n converted_names = converted.goto(name.get_public_name())\n if converted_names:\n for n in converted_names:\n if n.get_root_context().is_stub():\n # If it's a stub again, it means we're going in\n # a circle. Probably some imports make it a\n # stub again.\n yield name\n else:\n yield n\n continue\n yield name\n\n\ndef _load_stub_module(module):\n if module.is_stub():\n return module\n return try_to_load_stub_cached(\n module.inference_state,\n import_names=module.string_names,\n python_value_set=ValueSet([module]),\n parent_module_value=None,\n sys_path=module.inference_state.get_sys_path(),\n )\n\n\n@to_list\ndef _python_to_stub_names(names, fallback_to_python=False):\n for name in names:\n module_context = name.get_root_context()\n if module_context.is_stub():\n yield name\n continue\n\n if name.api_type == 'module':\n found_name = False\n for n in name.goto():\n if n.api_type == 'module':\n values = convert_values(n.infer(), only_stubs=True)\n for v in values:\n yield v.name\n found_name = True\n else:\n for x in _python_to_stub_names([n], fallback_to_python=fallback_to_python):\n yield x\n found_name = True\n if found_name:\n continue\n else:\n v = name.get_defining_qualified_value()\n if v is not None:\n converted = to_stub(v)\n if converted:\n converted_names = converted.goto(name.get_public_name())\n if converted_names:\n yield from converted_names\n continue\n if fallback_to_python:\n # This is the part where if we haven't found anything, just return\n # the stub name.\n yield name\n\n\ndef convert_names(names, only_stubs=False, prefer_stubs=False, prefer_stub_to_compiled=True):\n if only_stubs and prefer_stubs:\n raise ValueError("You cannot use both of only_stubs and prefer_stubs.")\n\n with debug.increase_indent_cm('convert names'):\n if only_stubs or prefer_stubs:\n return _python_to_stub_names(names, fallback_to_python=prefer_stubs)\n else:\n return _try_stub_to_python_names(\n names, prefer_stub_to_compiled=prefer_stub_to_compiled)\n\n\ndef convert_values(values, only_stubs=False, prefer_stubs=False, ignore_compiled=True):\n assert not (only_stubs and prefer_stubs)\n with debug.increase_indent_cm('convert values'):\n if only_stubs or prefer_stubs:\n return ValueSet.from_sets(\n to_stub(value)\n or (ValueSet({value}) if prefer_stubs else NO_VALUES)\n for value in values\n )\n else:\n return ValueSet.from_sets(\n _stub_to_python_value_set(stub_value, ignore_compiled=ignore_compiled)\n or ValueSet({stub_value})\n for stub_value in values\n )\n\n\ndef to_stub(value):\n if value.is_stub():\n return ValueSet([value])\n\n was_instance = value.is_instance()\n if was_instance:\n value = value.py__class__()\n\n qualified_names = value.get_qualified_names()\n stub_module = _load_stub_module(value.get_root_context().get_value())\n if stub_module is None or qualified_names is None:\n return NO_VALUES\n\n was_bound_method = value.is_bound_method()\n if was_bound_method:\n # Infer the object first. We can infer the method later.\n method_name = qualified_names[-1]\n qualified_names = qualified_names[:-1]\n was_instance = True\n\n stub_values = ValueSet([stub_module])\n for name in qualified_names:\n stub_values = stub_values.py__getattribute__(name)\n\n if was_instance:\n stub_values = ValueSet.from_sets(\n c.execute_with_values()\n for c in stub_values\n if c.is_class()\n )\n if was_bound_method:\n # Now that the instance has been properly created, we can simply get\n # the method.\n stub_values = stub_values.py__getattribute__(method_name)\n return stub_values\n
|
.venv\Lib\site-packages\jedi\inference\gradual\conversion.py
|
conversion.py
|
Python
| 7,601 | 0.95 | 0.291866 | 0.061111 |
vue-tools
| 937 |
2024-06-11T03:26:48.769316
|
BSD-3-Clause
| false |
7898deed04aaa34636d1cc362eba869c
|
"""\nThis module is about generics, like the `int` in `List[int]`. It's not about\nthe Generic class.\n"""\n\nfrom jedi import debug\nfrom jedi.cache import memoize_method\nfrom jedi.inference.utils import to_tuple\nfrom jedi.inference.base_value import ValueSet, NO_VALUES\nfrom jedi.inference.value.iterable import SequenceLiteralValue\nfrom jedi.inference.helpers import is_string\n\n\ndef _resolve_forward_references(context, value_set):\n for value in value_set:\n if is_string(value):\n from jedi.inference.gradual.annotation import _get_forward_reference_node\n node = _get_forward_reference_node(context, value.get_safe_value())\n if node is not None:\n for c in context.infer_node(node):\n yield c\n else:\n yield value\n\n\nclass _AbstractGenericManager:\n def get_index_and_execute(self, index):\n try:\n return self[index].execute_annotation()\n except IndexError:\n debug.warning('No param #%s found for annotation %s', index, self)\n return NO_VALUES\n\n def get_type_hint(self):\n return '[%s]' % ', '.join(t.get_type_hint(add_class_info=False) for t in self.to_tuple())\n\n\nclass LazyGenericManager(_AbstractGenericManager):\n def __init__(self, context_of_index, index_value):\n self._context_of_index = context_of_index\n self._index_value = index_value\n\n @memoize_method\n def __getitem__(self, index):\n return self._tuple()[index]()\n\n def __len__(self):\n return len(self._tuple())\n\n @memoize_method\n @to_tuple\n def _tuple(self):\n def lambda_scoping_in_for_loop_sucks(lazy_value):\n return lambda: ValueSet(_resolve_forward_references(\n self._context_of_index,\n lazy_value.infer()\n ))\n\n if isinstance(self._index_value, SequenceLiteralValue):\n for lazy_value in self._index_value.py__iter__(contextualized_node=None):\n yield lambda_scoping_in_for_loop_sucks(lazy_value)\n else:\n yield lambda: ValueSet(_resolve_forward_references(\n self._context_of_index,\n ValueSet([self._index_value])\n ))\n\n @to_tuple\n def to_tuple(self):\n for callable_ in self._tuple():\n yield callable_()\n\n def is_homogenous_tuple(self):\n if isinstance(self._index_value, SequenceLiteralValue):\n entries = self._index_value.get_tree_entries()\n if len(entries) == 2 and entries[1] == '...':\n return True\n return False\n\n def __repr__(self):\n return '<LazyG>[%s]' % (', '.join(repr(x) for x in self.to_tuple()))\n\n\nclass TupleGenericManager(_AbstractGenericManager):\n def __init__(self, tup):\n self._tuple = tup\n\n def __getitem__(self, index):\n return self._tuple[index]\n\n def __len__(self):\n return len(self._tuple)\n\n def to_tuple(self):\n return self._tuple\n\n def is_homogenous_tuple(self):\n return False\n\n def __repr__(self):\n return '<TupG>[%s]' % (', '.join(repr(x) for x in self.to_tuple()))\n
|
.venv\Lib\site-packages\jedi\inference\gradual\generics.py
|
generics.py
|
Python
| 3,144 | 0.95 | 0.346535 | 0 |
node-utils
| 317 |
2024-06-25T19:10:00.599653
|
GPL-3.0
| false |
22b798aef38af52039818b7db53964f6
|
from jedi.inference.base_value import ValueWrapper\nfrom jedi.inference.value.module import ModuleValue\nfrom jedi.inference.filters import ParserTreeFilter\nfrom jedi.inference.names import StubName, StubModuleName\nfrom jedi.inference.gradual.typing import TypingModuleFilterWrapper\nfrom jedi.inference.context import ModuleContext\n\n\nclass StubModuleValue(ModuleValue):\n _module_name_class = StubModuleName\n\n def __init__(self, non_stub_value_set, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.non_stub_value_set = non_stub_value_set\n\n def is_stub(self):\n return True\n\n def sub_modules_dict(self):\n """\n We have to overwrite this, because it's possible to have stubs that\n don't have code for all the child modules. At the time of writing this\n there are for example no stubs for `json.tool`.\n """\n names = {}\n for value in self.non_stub_value_set:\n try:\n method = value.sub_modules_dict\n except AttributeError:\n pass\n else:\n names.update(method())\n names.update(super().sub_modules_dict())\n return names\n\n def _get_stub_filters(self, origin_scope):\n return [StubFilter(\n parent_context=self.as_context(),\n origin_scope=origin_scope\n )] + list(self.iter_star_filters())\n\n def get_filters(self, origin_scope=None):\n filters = super().get_filters(origin_scope)\n next(filters, None) # Ignore the first filter and replace it with our own\n stub_filters = self._get_stub_filters(origin_scope=origin_scope)\n yield from stub_filters\n yield from filters\n\n def _as_context(self):\n return StubModuleContext(self)\n\n\nclass StubModuleContext(ModuleContext):\n def get_filters(self, until_position=None, origin_scope=None):\n # Make sure to ignore the position, because positions are not relevant\n # for stubs.\n return super().get_filters(origin_scope=origin_scope)\n\n\nclass TypingModuleWrapper(StubModuleValue):\n def get_filters(self, *args, **kwargs):\n filters = super().get_filters(*args, **kwargs)\n f = next(filters, None)\n assert f is not None\n yield TypingModuleFilterWrapper(f)\n yield from filters\n\n def _as_context(self):\n return TypingModuleContext(self)\n\n\nclass TypingModuleContext(ModuleContext):\n def get_filters(self, *args, **kwargs):\n filters = super().get_filters(*args, **kwargs)\n yield TypingModuleFilterWrapper(next(filters, None))\n yield from filters\n\n\nclass StubFilter(ParserTreeFilter):\n name_class = StubName\n\n def _is_name_reachable(self, name):\n if not super()._is_name_reachable(name):\n return False\n\n # Imports in stub files are only public if they have an "as"\n # export.\n definition = name.get_definition()\n if definition is None:\n return False\n if definition.type in ('import_from', 'import_name'):\n if name.parent.type not in ('import_as_name', 'dotted_as_name'):\n return False\n n = name.value\n # TODO rewrite direct return\n if n.startswith('_') and not (n.startswith('__') and n.endswith('__')):\n return False\n return True\n\n\nclass VersionInfo(ValueWrapper):\n pass\n
|
.venv\Lib\site-packages\jedi\inference\gradual\stub_value.py
|
stub_value.py
|
Python
| 3,385 | 0.95 | 0.284314 | 0.061728 |
vue-tools
| 597 |
2024-02-01T21:37:45.500355
|
GPL-3.0
| false |
4224089e88943e931e754fea95708f11
|
import os\nimport re\nfrom functools import wraps\nfrom collections import namedtuple\nfrom typing import Dict, Mapping, Tuple\nfrom pathlib import Path\n\nfrom jedi import settings\nfrom jedi.file_io import FileIO\nfrom jedi.parser_utils import get_cached_code_lines\nfrom jedi.inference.base_value import ValueSet, NO_VALUES\nfrom jedi.inference.gradual.stub_value import TypingModuleWrapper, StubModuleValue\nfrom jedi.inference.value import ModuleValue\n\n_jedi_path = Path(__file__).parent.parent.parent\nTYPESHED_PATH = _jedi_path.joinpath('third_party', 'typeshed')\nDJANGO_INIT_PATH = _jedi_path.joinpath('third_party', 'django-stubs',\n 'django-stubs', '__init__.pyi')\n\n_IMPORT_MAP = dict(\n _collections='collections',\n _socket='socket',\n)\n\nPathInfo = namedtuple('PathInfo', 'path is_third_party')\n\n\ndef _merge_create_stub_map(path_infos):\n map_ = {}\n for directory_path_info in path_infos:\n map_.update(_create_stub_map(directory_path_info))\n return map_\n\n\ndef _create_stub_map(directory_path_info):\n """\n Create a mapping of an importable name in Python to a stub file.\n """\n def generate():\n try:\n listed = os.listdir(directory_path_info.path)\n except (FileNotFoundError, NotADirectoryError):\n return\n\n for entry in listed:\n path = os.path.join(directory_path_info.path, entry)\n if os.path.isdir(path):\n init = os.path.join(path, '__init__.pyi')\n if os.path.isfile(init):\n yield entry, PathInfo(init, directory_path_info.is_third_party)\n elif entry.endswith('.pyi') and os.path.isfile(path):\n name = entry[:-4]\n if name != '__init__':\n yield name, PathInfo(path, directory_path_info.is_third_party)\n\n # Create a dictionary from the tuple generator.\n return dict(generate())\n\n\ndef _get_typeshed_directories(version_info):\n check_version_list = ['2and3', '3']\n for base in ['stdlib', 'third_party']:\n base_path = TYPESHED_PATH.joinpath(base)\n base_list = os.listdir(base_path)\n for base_list_entry in base_list:\n match = re.match(r'(\d+)\.(\d+)$', base_list_entry)\n if match is not None:\n if match.group(1) == '3' and int(match.group(2)) <= version_info.minor:\n check_version_list.append(base_list_entry)\n\n for check_version in check_version_list:\n is_third_party = base != 'stdlib'\n yield PathInfo(str(base_path.joinpath(check_version)), is_third_party)\n\n\n_version_cache: Dict[Tuple[int, int], Mapping[str, PathInfo]] = {}\n\n\ndef _cache_stub_file_map(version_info):\n """\n Returns a map of an importable name in Python to a stub file.\n """\n # TODO this caches the stub files indefinitely, maybe use a time cache\n # for that?\n version = version_info[:2]\n try:\n return _version_cache[version]\n except KeyError:\n pass\n\n _version_cache[version] = file_set = \\n _merge_create_stub_map(_get_typeshed_directories(version_info))\n return file_set\n\n\ndef import_module_decorator(func):\n @wraps(func)\n def wrapper(inference_state, import_names, parent_module_value, sys_path, prefer_stubs):\n python_value_set = inference_state.module_cache.get(import_names)\n if python_value_set is None:\n if parent_module_value is not None and parent_module_value.is_stub():\n parent_module_values = parent_module_value.non_stub_value_set\n else:\n parent_module_values = [parent_module_value]\n if import_names == ('os', 'path'):\n # This is a huge exception, we follow a nested import\n # ``os.path``, because it's a very important one in Python\n # that is being achieved by messing with ``sys.modules`` in\n # ``os``.\n python_value_set = ValueSet.from_sets(\n func(inference_state, (n,), None, sys_path,)\n for n in ['posixpath', 'ntpath', 'macpath', 'os2emxpath']\n )\n else:\n python_value_set = ValueSet.from_sets(\n func(inference_state, import_names, p, sys_path,)\n for p in parent_module_values\n )\n inference_state.module_cache.add(import_names, python_value_set)\n\n if not prefer_stubs or import_names[0] in settings.auto_import_modules:\n return python_value_set\n\n stub = try_to_load_stub_cached(inference_state, import_names, python_value_set,\n parent_module_value, sys_path)\n if stub is not None:\n return ValueSet([stub])\n return python_value_set\n\n return wrapper\n\n\ndef try_to_load_stub_cached(inference_state, import_names, *args, **kwargs):\n if import_names is None:\n return None\n\n try:\n return inference_state.stub_module_cache[import_names]\n except KeyError:\n pass\n\n # TODO is this needed? where are the exceptions coming from that make this\n # necessary? Just remove this line.\n inference_state.stub_module_cache[import_names] = None\n inference_state.stub_module_cache[import_names] = result = \\n _try_to_load_stub(inference_state, import_names, *args, **kwargs)\n return result\n\n\ndef _try_to_load_stub(inference_state, import_names, python_value_set,\n parent_module_value, sys_path):\n """\n Trying to load a stub for a set of import_names.\n\n This is modelled to work like "PEP 561 -- Distributing and Packaging Type\n Information", see https://www.python.org/dev/peps/pep-0561.\n """\n if parent_module_value is None and len(import_names) > 1:\n try:\n parent_module_value = try_to_load_stub_cached(\n inference_state, import_names[:-1], NO_VALUES,\n parent_module_value=None, sys_path=sys_path)\n except KeyError:\n pass\n\n # 1. Try to load foo-stubs folders on path for import name foo.\n if len(import_names) == 1:\n # foo-stubs\n for p in sys_path:\n init = os.path.join(p, *import_names) + '-stubs' + os.path.sep + '__init__.pyi'\n m = _try_to_load_stub_from_file(\n inference_state,\n python_value_set,\n file_io=FileIO(init),\n import_names=import_names,\n )\n if m is not None:\n return m\n if import_names[0] == 'django' and python_value_set:\n return _try_to_load_stub_from_file(\n inference_state,\n python_value_set,\n file_io=FileIO(str(DJANGO_INIT_PATH)),\n import_names=import_names,\n )\n\n # 2. Try to load pyi files next to py files.\n for c in python_value_set:\n try:\n method = c.py__file__\n except AttributeError:\n pass\n else:\n file_path = method()\n file_paths = []\n if c.is_namespace():\n file_paths = [os.path.join(p, '__init__.pyi') for p in c.py__path__()]\n elif file_path is not None and file_path.suffix == '.py':\n file_paths = [str(file_path) + 'i']\n\n for file_path in file_paths:\n m = _try_to_load_stub_from_file(\n inference_state,\n python_value_set,\n # The file path should end with .pyi\n file_io=FileIO(file_path),\n import_names=import_names,\n )\n if m is not None:\n return m\n\n # 3. Try to load typeshed\n m = _load_from_typeshed(inference_state, python_value_set, parent_module_value, import_names)\n if m is not None:\n return m\n\n # 4. Try to load pyi file somewhere if python_value_set was not defined.\n if not python_value_set:\n if parent_module_value is not None:\n check_path = parent_module_value.py__path__() or []\n # In case import_names\n names_for_path = (import_names[-1],)\n else:\n check_path = sys_path\n names_for_path = import_names\n\n for p in check_path:\n m = _try_to_load_stub_from_file(\n inference_state,\n python_value_set,\n file_io=FileIO(os.path.join(p, *names_for_path) + '.pyi'),\n import_names=import_names,\n )\n if m is not None:\n return m\n\n # If no stub is found, that's fine, the calling function has to deal with\n # it.\n return None\n\n\ndef _load_from_typeshed(inference_state, python_value_set, parent_module_value, import_names):\n import_name = import_names[-1]\n map_ = None\n if len(import_names) == 1:\n map_ = _cache_stub_file_map(inference_state.grammar.version_info)\n import_name = _IMPORT_MAP.get(import_name, import_name)\n elif isinstance(parent_module_value, ModuleValue):\n if not parent_module_value.is_package():\n # Only if it's a package (= a folder) something can be\n # imported.\n return None\n paths = parent_module_value.py__path__()\n # Once the initial package has been loaded, the sub packages will\n # always be loaded, regardless if they are there or not. This makes\n # sense, IMO, because stubs take preference, even if the original\n # library doesn't provide a module (it could be dynamic). ~dave\n map_ = _merge_create_stub_map([PathInfo(p, is_third_party=False) for p in paths])\n\n if map_ is not None:\n path_info = map_.get(import_name)\n if path_info is not None and (not path_info.is_third_party or python_value_set):\n return _try_to_load_stub_from_file(\n inference_state,\n python_value_set,\n file_io=FileIO(path_info.path),\n import_names=import_names,\n )\n\n\ndef _try_to_load_stub_from_file(inference_state, python_value_set, file_io, import_names):\n try:\n stub_module_node = parse_stub_module(inference_state, file_io)\n except OSError:\n # The file that you're looking for doesn't exist (anymore).\n return None\n else:\n return create_stub_module(\n inference_state, inference_state.latest_grammar, python_value_set,\n stub_module_node, file_io, import_names\n )\n\n\ndef parse_stub_module(inference_state, file_io):\n return inference_state.parse(\n file_io=file_io,\n cache=True,\n diff_cache=settings.fast_parser,\n cache_path=settings.cache_directory,\n use_latest_grammar=True\n )\n\n\ndef create_stub_module(inference_state, grammar, python_value_set,\n stub_module_node, file_io, import_names):\n if import_names == ('typing',):\n module_cls = TypingModuleWrapper\n else:\n module_cls = StubModuleValue\n file_name = os.path.basename(file_io.path)\n stub_module_value = module_cls(\n python_value_set, inference_state, stub_module_node,\n file_io=file_io,\n string_names=import_names,\n # The code was loaded with latest_grammar, so use\n # that.\n code_lines=get_cached_code_lines(grammar, file_io.path),\n is_package=file_name == '__init__.pyi',\n )\n return stub_module_value\n
|
.venv\Lib\site-packages\jedi\inference\gradual\typeshed.py
|
typeshed.py
|
Python
| 11,467 | 0.95 | 0.216129 | 0.102273 |
node-utils
| 729 |
2023-07-22T06:35:23.669392
|
BSD-3-Clause
| false |
0e545245963f63db95f80f7c2e46e2d9
|
from pathlib import Path\n\nfrom jedi.inference.gradual.typeshed import TYPESHED_PATH, create_stub_module\n\n\ndef load_proper_stub_module(inference_state, grammar, file_io, import_names, module_node):\n """\n This function is given a random .pyi file and should return the proper\n module.\n """\n path = file_io.path\n path = Path(path)\n assert path.suffix == '.pyi'\n try:\n relative_path = path.relative_to(TYPESHED_PATH)\n except ValueError:\n pass\n else:\n # /[...]/stdlib/3/os/__init__.pyi -> stdlib/3/os/__init__\n rest = relative_path.with_suffix('')\n # Remove the stdlib/3 or third_party/3.6 part\n import_names = rest.parts[2:]\n if rest.name == '__init__':\n import_names = import_names[:-1]\n\n if import_names is not None:\n actual_value_set = inference_state.import_module(import_names, prefer_stubs=False)\n\n stub = create_stub_module(\n inference_state, grammar, actual_value_set,\n module_node, file_io, import_names\n )\n inference_state.stub_module_cache[import_names] = stub\n return stub\n return None\n
|
.venv\Lib\site-packages\jedi\inference\gradual\utils.py
|
utils.py
|
Python
| 1,147 | 0.95 | 0.142857 | 0.066667 |
react-lib
| 894 |
2024-10-31T02:31:48.049637
|
GPL-3.0
| false |
37b97de3634ab1c86dd5f8ae221399ec
|
"""\nIt is unfortunately not well documented how stubs and annotations work in Jedi.\nIf somebody needs an introduction, please let me know.\n"""\n
|
.venv\Lib\site-packages\jedi\inference\gradual\__init__.py
|
__init__.py
|
Python
| 143 | 0.7 | 0 | 0 |
python-kit
| 294 |
2024-09-03T02:40:44.344418
|
GPL-3.0
| false |
9cef0a1f3b15596c2f03c75b986283e9
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\gradual\__pycache__\annotation.cpython-313.pyc
|
annotation.cpython-313.pyc
|
Other
| 19,044 | 0.95 | 0.092308 | 0.005988 |
react-lib
| 266 |
2025-06-30T05:25:39.645692
|
MIT
| false |
19c911dea0886afc35291a82d3e62e7c
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\gradual\__pycache__\base.cpython-313.pyc
|
base.cpython-313.pyc
|
Other
| 23,209 | 0.95 | 0.06338 | 0 |
react-lib
| 824 |
2024-08-08T02:58:48.050051
|
GPL-3.0
| false |
a8a549b72843610d795e3d2e0b4f433b
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\gradual\__pycache__\conversion.cpython-313.pyc
|
conversion.cpython-313.pyc
|
Other
| 9,188 | 0.95 | 0 | 0 |
react-lib
| 168 |
2024-09-22T20:39:58.064657
|
GPL-3.0
| false |
b3243d8a9543e2a8920591d8d939dd4a
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\gradual\__pycache__\generics.cpython-313.pyc
|
generics.cpython-313.pyc
|
Other
| 7,224 | 0.8 | 0.060606 | 0 |
vue-tools
| 743 |
2025-03-16T11:15:29.505599
|
GPL-3.0
| false |
e25c658a9760e5456a3c9952235100e4
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\gradual\__pycache__\stub_value.cpython-313.pyc
|
stub_value.cpython-313.pyc
|
Other
| 5,939 | 0.95 | 0.073171 | 0 |
react-lib
| 6 |
2023-07-22T21:37:55.957356
|
Apache-2.0
| false |
255cf5ab1105a4d8c8a4ff4382b1e261
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\gradual\__pycache__\typeshed.cpython-313.pyc
|
typeshed.cpython-313.pyc
|
Other
| 11,756 | 0.95 | 0.009009 | 0 |
python-kit
| 209 |
2025-02-12T09:46:34.502180
|
GPL-3.0
| false |
61462f11001623c93f393a3f2ba12fcc
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\gradual\__pycache__\type_var.cpython-313.pyc
|
type_var.cpython-313.pyc
|
Other
| 7,006 | 0.95 | 0.020833 | 0 |
node-utils
| 435 |
2024-08-22T12:19:29.696820
|
GPL-3.0
| false |
2b9fcbf06b23b656fb679bd0d3f6da6e
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\gradual\__pycache__\typing.cpython-313.pyc
|
typing.cpython-313.pyc
|
Other
| 25,052 | 0.95 | 0.009852 | 0.005236 |
awesome-app
| 954 |
2023-08-18T08:02:17.035130
|
Apache-2.0
| false |
298af473cebcc16a6df08f7aece7a2c0
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\gradual\__pycache__\utils.cpython-313.pyc
|
utils.cpython-313.pyc
|
Other
| 1,393 | 0.95 | 0.052632 | 0 |
node-utils
| 384 |
2024-12-26T07:08:14.451587
|
BSD-3-Clause
| false |
33f8497e6e7e665de9e6453a88ec1888
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\gradual\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 350 | 0.7 | 0 | 0 |
node-utils
| 968 |
2023-07-28T00:27:07.773139
|
BSD-3-Clause
| false |
10b8951f5e12adcb294e792da843742c
|
'''\nDecorators are not really values, however we need some wrappers to improve\ndocstrings and other things around decorators.\n'''\n\nfrom jedi.inference.base_value import ValueWrapper, ValueSet\n\n\nclass Decoratee(ValueWrapper):\n def __init__(self, wrapped_value, original_value):\n super().__init__(wrapped_value)\n self._original_value = original_value\n\n def py__doc__(self):\n return self._original_value.py__doc__()\n\n def py__get__(self, instance, class_value):\n return ValueSet(\n Decoratee(v, self._original_value)\n for v in self._wrapped_value.py__get__(instance, class_value)\n )\n\n def get_signatures(self):\n signatures = self._wrapped_value.get_signatures()\n if signatures:\n return signatures\n # Fallback to signatures of the original function/class if the\n # decorator has no signature or it is not inferrable.\n #\n # __get__ means that it's a descriptor. In that case we don't return\n # signatures, because they are usually properties.\n if not self._wrapped_value.py__getattribute__('__get__'):\n return self._original_value.get_signatures()\n return []\n
|
.venv\Lib\site-packages\jedi\inference\value\decorator.py
|
decorator.py
|
Python
| 1,207 | 0.95 | 0.323529 | 0.178571 |
node-utils
| 727 |
2024-08-13T13:04:26.936828
|
Apache-2.0
| false |
0c00c52ea59ebed04df800841b2be293
|
"""\nA module to deal with stuff like `list.append` and `set.add`.\n\nArray modifications\n*******************\n\nIf the content of an array (``set``/``list``) is requested somewhere, the\ncurrent module will be checked for appearances of ``arr.append``,\n``arr.insert``, etc. If the ``arr`` name points to an actual array, the\ncontent will be added\n\nThis can be really cpu intensive, as you can imagine. Because |jedi| has to\nfollow **every** ``append`` and check whether it's the right array. However this\nworks pretty good, because in *slow* cases, the recursion detector and other\nsettings will stop this process.\n\nIt is important to note that:\n\n1. Array modifications work only in the current module.\n2. Jedi only checks Array additions; ``list.pop``, etc are ignored.\n"""\nfrom jedi import debug\nfrom jedi import settings\nfrom jedi.inference import recursion\nfrom jedi.inference.base_value import ValueSet, NO_VALUES, HelperValueMixin, \\n ValueWrapper\nfrom jedi.inference.lazy_value import LazyKnownValues\nfrom jedi.inference.helpers import infer_call_of_leaf\nfrom jedi.inference.cache import inference_state_method_cache\n\n_sentinel = object()\n\n\ndef check_array_additions(context, sequence):\n """ Just a mapper function for the internal _internal_check_array_additions """\n if sequence.array_type not in ('list', 'set'):\n # TODO also check for dict updates\n return NO_VALUES\n\n return _internal_check_array_additions(context, sequence)\n\n\n@inference_state_method_cache(default=NO_VALUES)\n@debug.increase_indent\ndef _internal_check_array_additions(context, sequence):\n """\n Checks if a `Array` has "add" (append, insert, extend) statements:\n\n >>> a = [""]\n >>> a.append(1)\n """\n from jedi.inference import arguments\n\n debug.dbg('Dynamic array search for %s' % sequence, color='MAGENTA')\n module_context = context.get_root_context()\n if not settings.dynamic_array_additions or module_context.is_compiled():\n debug.dbg('Dynamic array search aborted.', color='MAGENTA')\n return NO_VALUES\n\n def find_additions(context, arglist, add_name):\n params = list(arguments.TreeArguments(context.inference_state, context, arglist).unpack())\n result = set()\n if add_name in ['insert']:\n params = params[1:]\n if add_name in ['append', 'add', 'insert']:\n for key, lazy_value in params:\n result.add(lazy_value)\n elif add_name in ['extend', 'update']:\n for key, lazy_value in params:\n result |= set(lazy_value.infer().iterate())\n return result\n\n temp_param_add, settings.dynamic_params_for_other_modules = \\n settings.dynamic_params_for_other_modules, False\n\n is_list = sequence.name.string_name == 'list'\n search_names = (['append', 'extend', 'insert'] if is_list else ['add', 'update'])\n\n added_types = set()\n for add_name in search_names:\n try:\n possible_names = module_context.tree_node.get_used_names()[add_name]\n except KeyError:\n continue\n else:\n for name in possible_names:\n value_node = context.tree_node\n if not (value_node.start_pos < name.start_pos < value_node.end_pos):\n continue\n trailer = name.parent\n power = trailer.parent\n trailer_pos = power.children.index(trailer)\n try:\n execution_trailer = power.children[trailer_pos + 1]\n except IndexError:\n continue\n else:\n if execution_trailer.type != 'trailer' \\n or execution_trailer.children[0] != '(' \\n or execution_trailer.children[1] == ')':\n continue\n\n random_context = context.create_context(name)\n\n with recursion.execution_allowed(context.inference_state, power) as allowed:\n if allowed:\n found = infer_call_of_leaf(\n random_context,\n name,\n cut_own_trailer=True\n )\n if sequence in found:\n # The arrays match. Now add the results\n added_types |= find_additions(\n random_context,\n execution_trailer.children[1],\n add_name\n )\n\n # reset settings\n settings.dynamic_params_for_other_modules = temp_param_add\n debug.dbg('Dynamic array result %s', added_types, color='MAGENTA')\n return added_types\n\n\ndef get_dynamic_array_instance(instance, arguments):\n """Used for set() and list() instances."""\n ai = _DynamicArrayAdditions(instance, arguments)\n from jedi.inference import arguments\n return arguments.ValuesArguments([ValueSet([ai])])\n\n\nclass _DynamicArrayAdditions(HelperValueMixin):\n """\n Used for the usage of set() and list().\n This is definitely a hack, but a good one :-)\n It makes it possible to use set/list conversions.\n\n This is not a proper context, because it doesn't have to be. It's not used\n in the wild, it's just used within typeshed as an argument to `__init__`\n for set/list and never used in any other place.\n """\n def __init__(self, instance, arguments):\n self._instance = instance\n self._arguments = arguments\n\n def py__class__(self):\n tuple_, = self._instance.inference_state.builtins_module.py__getattribute__('tuple')\n return tuple_\n\n def py__iter__(self, contextualized_node=None):\n arguments = self._arguments\n try:\n _, lazy_value = next(arguments.unpack())\n except StopIteration:\n pass\n else:\n yield from lazy_value.infer().iterate()\n\n from jedi.inference.arguments import TreeArguments\n if isinstance(arguments, TreeArguments):\n additions = _internal_check_array_additions(arguments.context, self._instance)\n yield from additions\n\n def iterate(self, contextualized_node=None, is_async=False):\n return self.py__iter__(contextualized_node)\n\n\nclass _Modification(ValueWrapper):\n def __init__(self, wrapped_value, assigned_values, contextualized_key):\n super().__init__(wrapped_value)\n self._assigned_values = assigned_values\n self._contextualized_key = contextualized_key\n\n def py__getitem__(self, *args, **kwargs):\n return self._wrapped_value.py__getitem__(*args, **kwargs) | self._assigned_values\n\n def py__simple_getitem__(self, index):\n actual = [\n v.get_safe_value(_sentinel)\n for v in self._contextualized_key.infer()\n ]\n if index in actual:\n return self._assigned_values\n return self._wrapped_value.py__simple_getitem__(index)\n\n\nclass DictModification(_Modification):\n def py__iter__(self, contextualized_node=None):\n yield from self._wrapped_value.py__iter__(contextualized_node)\n yield self._contextualized_key\n\n def get_key_values(self):\n return self._wrapped_value.get_key_values() | self._contextualized_key.infer()\n\n\nclass ListModification(_Modification):\n def py__iter__(self, contextualized_node=None):\n yield from self._wrapped_value.py__iter__(contextualized_node)\n yield LazyKnownValues(self._assigned_values)\n
|
.venv\Lib\site-packages\jedi\inference\value\dynamic_arrays.py
|
dynamic_arrays.py
|
Python
| 7,527 | 0.95 | 0.23 | 0.024691 |
react-lib
| 770 |
2024-08-10T18:53:19.107648
|
Apache-2.0
| false |
1766a01279879b1b2cbbe4d3bed0d63a
|
"""\nContains all classes and functions to deal with lists, dicts, generators and\niterators in general.\n"""\nfrom jedi.inference import compiled\nfrom jedi.inference import analysis\nfrom jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues, \\n LazyTreeValue\nfrom jedi.inference.helpers import get_int_or_none, is_string, \\n reraise_getitem_errors, SimpleGetItemNotFound\nfrom jedi.inference.utils import safe_property, to_list\nfrom jedi.inference.cache import inference_state_method_cache\nfrom jedi.inference.filters import LazyAttributeOverwrite, publish_method\nfrom jedi.inference.base_value import ValueSet, Value, NO_VALUES, \\n ContextualizedNode, iterate_values, sentinel, \\n LazyValueWrapper\nfrom jedi.parser_utils import get_sync_comp_fors\nfrom jedi.inference.context import CompForContext\nfrom jedi.inference.value.dynamic_arrays import check_array_additions\n\n\nclass IterableMixin:\n def py__next__(self, contextualized_node=None):\n return self.py__iter__(contextualized_node)\n\n def py__stop_iteration_returns(self):\n return ValueSet([compiled.builtin_from_name(self.inference_state, 'None')])\n\n # At the moment, safe values are simple values like "foo", 1 and not\n # lists/dicts. Therefore as a small speed optimization we can just do the\n # default instead of resolving the lazy wrapped values, that are just\n # doing this in the end as well.\n # This mostly speeds up patterns like `sys.version_info >= (3, 0)` in\n # typeshed.\n get_safe_value = Value.get_safe_value\n\n\nclass GeneratorBase(LazyAttributeOverwrite, IterableMixin):\n array_type = None\n\n def _get_wrapped_value(self):\n instance, = self._get_cls().execute_annotation()\n return instance\n\n def _get_cls(self):\n generator, = self.inference_state.typing_module.py__getattribute__('Generator')\n return generator\n\n def py__bool__(self):\n return True\n\n @publish_method('__iter__')\n def _iter(self, arguments):\n return ValueSet([self])\n\n @publish_method('send')\n @publish_method('__next__')\n def _next(self, arguments):\n return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())\n\n def py__stop_iteration_returns(self):\n return ValueSet([compiled.builtin_from_name(self.inference_state, 'None')])\n\n @property\n def name(self):\n return compiled.CompiledValueName(self, 'Generator')\n\n def get_annotated_class_object(self):\n from jedi.inference.gradual.generics import TupleGenericManager\n gen_values = self.merge_types_of_iterate().py__class__()\n gm = TupleGenericManager((gen_values, NO_VALUES, NO_VALUES))\n return self._get_cls().with_generics(gm)\n\n\nclass Generator(GeneratorBase):\n """Handling of `yield` functions."""\n def __init__(self, inference_state, func_execution_context):\n super().__init__(inference_state)\n self._func_execution_context = func_execution_context\n\n def py__iter__(self, contextualized_node=None):\n iterators = self._func_execution_context.infer_annotations()\n if iterators:\n return iterators.iterate(contextualized_node)\n return self._func_execution_context.get_yield_lazy_values()\n\n def py__stop_iteration_returns(self):\n return self._func_execution_context.get_return_values()\n\n def __repr__(self):\n return "<%s of %s>" % (type(self).__name__, self._func_execution_context)\n\n\ndef comprehension_from_atom(inference_state, value, atom):\n bracket = atom.children[0]\n test_list_comp = atom.children[1]\n\n if bracket == '{':\n if atom.children[1].children[1] == ':':\n sync_comp_for = test_list_comp.children[3]\n if sync_comp_for.type == 'comp_for':\n sync_comp_for = sync_comp_for.children[1]\n\n return DictComprehension(\n inference_state,\n value,\n sync_comp_for_node=sync_comp_for,\n key_node=test_list_comp.children[0],\n value_node=test_list_comp.children[2],\n )\n else:\n cls = SetComprehension\n elif bracket == '(':\n cls = GeneratorComprehension\n elif bracket == '[':\n cls = ListComprehension\n\n sync_comp_for = test_list_comp.children[1]\n if sync_comp_for.type == 'comp_for':\n sync_comp_for = sync_comp_for.children[1]\n\n return cls(\n inference_state,\n defining_context=value,\n sync_comp_for_node=sync_comp_for,\n entry_node=test_list_comp.children[0],\n )\n\n\nclass ComprehensionMixin:\n @inference_state_method_cache()\n def _get_comp_for_context(self, parent_context, comp_for):\n return CompForContext(parent_context, comp_for)\n\n def _nested(self, comp_fors, parent_context=None):\n comp_for = comp_fors[0]\n\n is_async = comp_for.parent.type == 'comp_for'\n\n input_node = comp_for.children[3]\n parent_context = parent_context or self._defining_context\n input_types = parent_context.infer_node(input_node)\n\n cn = ContextualizedNode(parent_context, input_node)\n iterated = input_types.iterate(cn, is_async=is_async)\n exprlist = comp_for.children[1]\n for i, lazy_value in enumerate(iterated):\n types = lazy_value.infer()\n dct = unpack_tuple_to_dict(parent_context, types, exprlist)\n context = self._get_comp_for_context(\n parent_context,\n comp_for,\n )\n with context.predefine_names(comp_for, dct):\n try:\n yield from self._nested(comp_fors[1:], context)\n except IndexError:\n iterated = context.infer_node(self._entry_node)\n if self.array_type == 'dict':\n yield iterated, context.infer_node(self._value_node)\n else:\n yield iterated\n\n @inference_state_method_cache(default=[])\n @to_list\n def _iterate(self):\n comp_fors = tuple(get_sync_comp_fors(self._sync_comp_for_node))\n yield from self._nested(comp_fors)\n\n def py__iter__(self, contextualized_node=None):\n for set_ in self._iterate():\n yield LazyKnownValues(set_)\n\n def __repr__(self):\n return "<%s of %s>" % (type(self).__name__, self._sync_comp_for_node)\n\n\nclass _DictMixin:\n def _get_generics(self):\n return tuple(c_set.py__class__() for c_set in self.get_mapping_item_values())\n\n\nclass Sequence(LazyAttributeOverwrite, IterableMixin):\n api_type = 'instance'\n\n @property\n def name(self):\n return compiled.CompiledValueName(self, self.array_type)\n\n def _get_generics(self):\n return (self.merge_types_of_iterate().py__class__(),)\n\n @inference_state_method_cache(default=())\n def _cached_generics(self):\n return self._get_generics()\n\n def _get_wrapped_value(self):\n from jedi.inference.gradual.base import GenericClass\n from jedi.inference.gradual.generics import TupleGenericManager\n klass = compiled.builtin_from_name(self.inference_state, self.array_type)\n c, = GenericClass(\n klass,\n TupleGenericManager(self._cached_generics())\n ).execute_annotation()\n return c\n\n def py__bool__(self):\n return None # We don't know the length, because of appends.\n\n @safe_property\n def parent(self):\n return self.inference_state.builtins_module\n\n def py__getitem__(self, index_value_set, contextualized_node):\n if self.array_type == 'dict':\n return self._dict_values()\n return iterate_values(ValueSet([self]))\n\n\nclass _BaseComprehension(ComprehensionMixin):\n def __init__(self, inference_state, defining_context, sync_comp_for_node, entry_node):\n assert sync_comp_for_node.type == 'sync_comp_for'\n super().__init__(inference_state)\n self._defining_context = defining_context\n self._sync_comp_for_node = sync_comp_for_node\n self._entry_node = entry_node\n\n\nclass ListComprehension(_BaseComprehension, Sequence):\n array_type = 'list'\n\n def py__simple_getitem__(self, index):\n if isinstance(index, slice):\n return ValueSet([self])\n\n all_types = list(self.py__iter__())\n with reraise_getitem_errors(IndexError, TypeError):\n lazy_value = all_types[index]\n return lazy_value.infer()\n\n\nclass SetComprehension(_BaseComprehension, Sequence):\n array_type = 'set'\n\n\nclass GeneratorComprehension(_BaseComprehension, GeneratorBase):\n pass\n\n\nclass _DictKeyMixin:\n # TODO merge with _DictMixin?\n def get_mapping_item_values(self):\n return self._dict_keys(), self._dict_values()\n\n def get_key_values(self):\n # TODO merge with _dict_keys?\n return self._dict_keys()\n\n\nclass DictComprehension(ComprehensionMixin, Sequence, _DictKeyMixin):\n array_type = 'dict'\n\n def __init__(self, inference_state, defining_context, sync_comp_for_node, key_node, value_node):\n assert sync_comp_for_node.type == 'sync_comp_for'\n super().__init__(inference_state)\n self._defining_context = defining_context\n self._sync_comp_for_node = sync_comp_for_node\n self._entry_node = key_node\n self._value_node = value_node\n\n def py__iter__(self, contextualized_node=None):\n for keys, values in self._iterate():\n yield LazyKnownValues(keys)\n\n def py__simple_getitem__(self, index):\n for keys, values in self._iterate():\n for k in keys:\n # Be careful in the future if refactoring, index could be a\n # slice object.\n if k.get_safe_value(default=object()) == index:\n return values\n raise SimpleGetItemNotFound()\n\n def _dict_keys(self):\n return ValueSet.from_sets(keys for keys, values in self._iterate())\n\n def _dict_values(self):\n return ValueSet.from_sets(values for keys, values in self._iterate())\n\n @publish_method('values')\n def _imitate_values(self, arguments):\n lazy_value = LazyKnownValues(self._dict_values())\n return ValueSet([FakeList(self.inference_state, [lazy_value])])\n\n @publish_method('items')\n def _imitate_items(self, arguments):\n lazy_values = [\n LazyKnownValue(\n FakeTuple(\n self.inference_state,\n [LazyKnownValues(key),\n LazyKnownValues(value)]\n )\n )\n for key, value in self._iterate()\n ]\n\n return ValueSet([FakeList(self.inference_state, lazy_values)])\n\n def exact_key_items(self):\n # NOTE: A smarter thing can probably done here to achieve better\n # completions, but at least like this jedi doesn't crash\n return []\n\n\nclass SequenceLiteralValue(Sequence):\n _TUPLE_LIKE = 'testlist_star_expr', 'testlist', 'subscriptlist'\n mapping = {'(': 'tuple',\n '[': 'list',\n '{': 'set'}\n\n def __init__(self, inference_state, defining_context, atom):\n super().__init__(inference_state)\n self.atom = atom\n self._defining_context = defining_context\n\n if self.atom.type in self._TUPLE_LIKE:\n self.array_type = 'tuple'\n else:\n self.array_type = SequenceLiteralValue.mapping[atom.children[0]]\n """The builtin name of the array (list, set, tuple or dict)."""\n\n def _get_generics(self):\n if self.array_type == 'tuple':\n return tuple(x.infer().py__class__() for x in self.py__iter__())\n return super()._get_generics()\n\n def py__simple_getitem__(self, index):\n """Here the index is an int/str. Raises IndexError/KeyError."""\n if isinstance(index, slice):\n return ValueSet([self])\n else:\n with reraise_getitem_errors(TypeError, KeyError, IndexError):\n node = self.get_tree_entries()[index]\n if node == ':' or node.type == 'subscript':\n return NO_VALUES\n return self._defining_context.infer_node(node)\n\n def py__iter__(self, contextualized_node=None):\n """\n While values returns the possible values for any array field, this\n function returns the value for a certain index.\n """\n for node in self.get_tree_entries():\n if node == ':' or node.type == 'subscript':\n # TODO this should probably use at least part of the code\n # of infer_subscript_list.\n yield LazyKnownValue(Slice(self._defining_context, None, None, None))\n else:\n yield LazyTreeValue(self._defining_context, node)\n yield from check_array_additions(self._defining_context, self)\n\n def py__len__(self):\n # This function is not really used often. It's more of a try.\n return len(self.get_tree_entries())\n\n def get_tree_entries(self):\n c = self.atom.children\n\n if self.atom.type in self._TUPLE_LIKE:\n return c[::2]\n\n array_node = c[1]\n if array_node in (']', '}', ')'):\n return [] # Direct closing bracket, doesn't contain items.\n\n if array_node.type == 'testlist_comp':\n # filter out (for now) pep 448 single-star unpacking\n return [value for value in array_node.children[::2]\n if value.type != "star_expr"]\n elif array_node.type == 'dictorsetmaker':\n kv = []\n iterator = iter(array_node.children)\n for key in iterator:\n if key == "**":\n # dict with pep 448 double-star unpacking\n # for now ignoring the values imported by **\n next(iterator)\n next(iterator, None) # Possible comma.\n else:\n op = next(iterator, None)\n if op is None or op == ',':\n if key.type == "star_expr":\n # pep 448 single-star unpacking\n # for now ignoring values imported by *\n pass\n else:\n kv.append(key) # A set.\n else:\n assert op == ':' # A dict.\n kv.append((key, next(iterator)))\n next(iterator, None) # Possible comma.\n return kv\n else:\n if array_node.type == "star_expr":\n # pep 448 single-star unpacking\n # for now ignoring values imported by *\n return []\n else:\n return [array_node]\n\n def __repr__(self):\n return "<%s of %s>" % (self.__class__.__name__, self.atom)\n\n\nclass DictLiteralValue(_DictMixin, SequenceLiteralValue, _DictKeyMixin):\n array_type = 'dict'\n\n def __init__(self, inference_state, defining_context, atom):\n # Intentionally don't call the super class. This is definitely a sign\n # that the architecture is bad and we should refactor.\n Sequence.__init__(self, inference_state)\n self._defining_context = defining_context\n self.atom = atom\n\n def py__simple_getitem__(self, index):\n """Here the index is an int/str. Raises IndexError/KeyError."""\n compiled_value_index = compiled.create_simple_object(self.inference_state, index)\n for key, value in self.get_tree_entries():\n for k in self._defining_context.infer_node(key):\n for key_v in k.execute_operation(compiled_value_index, '=='):\n if key_v.get_safe_value():\n return self._defining_context.infer_node(value)\n raise SimpleGetItemNotFound('No key found in dictionary %s.' % self)\n\n def py__iter__(self, contextualized_node=None):\n """\n While values returns the possible values for any array field, this\n function returns the value for a certain index.\n """\n # Get keys.\n types = NO_VALUES\n for k, _ in self.get_tree_entries():\n types |= self._defining_context.infer_node(k)\n # We don't know which dict index comes first, therefore always\n # yield all the types.\n for _ in types:\n yield LazyKnownValues(types)\n\n @publish_method('values')\n def _imitate_values(self, arguments):\n lazy_value = LazyKnownValues(self._dict_values())\n return ValueSet([FakeList(self.inference_state, [lazy_value])])\n\n @publish_method('items')\n def _imitate_items(self, arguments):\n lazy_values = [\n LazyKnownValue(FakeTuple(\n self.inference_state,\n (LazyTreeValue(self._defining_context, key_node),\n LazyTreeValue(self._defining_context, value_node))\n )) for key_node, value_node in self.get_tree_entries()\n ]\n\n return ValueSet([FakeList(self.inference_state, lazy_values)])\n\n def exact_key_items(self):\n """\n Returns a generator of tuples like dict.items(), where the key is\n resolved (as a string) and the values are still lazy values.\n """\n for key_node, value in self.get_tree_entries():\n for key in self._defining_context.infer_node(key_node):\n if is_string(key):\n yield key.get_safe_value(), LazyTreeValue(self._defining_context, value)\n\n def _dict_values(self):\n return ValueSet.from_sets(\n self._defining_context.infer_node(v)\n for k, v in self.get_tree_entries()\n )\n\n def _dict_keys(self):\n return ValueSet.from_sets(\n self._defining_context.infer_node(k)\n for k, v in self.get_tree_entries()\n )\n\n\nclass _FakeSequence(Sequence):\n def __init__(self, inference_state, lazy_value_list):\n """\n type should be one of "tuple", "list"\n """\n super().__init__(inference_state)\n self._lazy_value_list = lazy_value_list\n\n def py__simple_getitem__(self, index):\n if isinstance(index, slice):\n return ValueSet([self])\n\n with reraise_getitem_errors(IndexError, TypeError):\n lazy_value = self._lazy_value_list[index]\n return lazy_value.infer()\n\n def py__iter__(self, contextualized_node=None):\n return self._lazy_value_list\n\n def py__bool__(self):\n return bool(len(self._lazy_value_list))\n\n def __repr__(self):\n return "<%s of %s>" % (type(self).__name__, self._lazy_value_list)\n\n\nclass FakeTuple(_FakeSequence):\n array_type = 'tuple'\n\n\nclass FakeList(_FakeSequence):\n array_type = 'tuple'\n\n\nclass FakeDict(_DictMixin, Sequence, _DictKeyMixin):\n array_type = 'dict'\n\n def __init__(self, inference_state, dct):\n super().__init__(inference_state)\n self._dct = dct\n\n def py__iter__(self, contextualized_node=None):\n for key in self._dct:\n yield LazyKnownValue(compiled.create_simple_object(self.inference_state, key))\n\n def py__simple_getitem__(self, index):\n with reraise_getitem_errors(KeyError, TypeError):\n lazy_value = self._dct[index]\n return lazy_value.infer()\n\n @publish_method('values')\n def _values(self, arguments):\n return ValueSet([FakeTuple(\n self.inference_state,\n [LazyKnownValues(self._dict_values())]\n )])\n\n def _dict_values(self):\n return ValueSet.from_sets(lazy_value.infer() for lazy_value in self._dct.values())\n\n def _dict_keys(self):\n return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())\n\n def exact_key_items(self):\n return self._dct.items()\n\n def __repr__(self):\n return '<%s: %s>' % (self.__class__.__name__, self._dct)\n\n\nclass MergedArray(Sequence):\n def __init__(self, inference_state, arrays):\n super().__init__(inference_state)\n self.array_type = arrays[-1].array_type\n self._arrays = arrays\n\n def py__iter__(self, contextualized_node=None):\n for array in self._arrays:\n yield from array.py__iter__()\n\n def py__simple_getitem__(self, index):\n return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())\n\n\ndef unpack_tuple_to_dict(context, types, exprlist):\n """\n Unpacking tuple assignments in for statements and expr_stmts.\n """\n if exprlist.type == 'name':\n return {exprlist.value: types}\n elif exprlist.type == 'atom' and exprlist.children[0] in ('(', '['):\n return unpack_tuple_to_dict(context, types, exprlist.children[1])\n elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist',\n 'testlist_star_expr'):\n dct = {}\n parts = iter(exprlist.children[::2])\n n = 0\n for lazy_value in types.iterate(ContextualizedNode(context, exprlist)):\n n += 1\n try:\n part = next(parts)\n except StopIteration:\n analysis.add(context, 'value-error-too-many-values', part,\n message="ValueError: too many values to unpack (expected %s)" % n)\n else:\n dct.update(unpack_tuple_to_dict(context, lazy_value.infer(), part))\n has_parts = next(parts, None)\n if types and has_parts is not None:\n analysis.add(context, 'value-error-too-few-values', has_parts,\n message="ValueError: need more than %s values to unpack" % n)\n return dct\n elif exprlist.type == 'power' or exprlist.type == 'atom_expr':\n # Something like ``arr[x], var = ...``.\n # This is something that is not yet supported, would also be difficult\n # to write into a dict.\n return {}\n elif exprlist.type == 'star_expr': # `a, *b, c = x` type unpackings\n # Currently we're not supporting them.\n return {}\n raise NotImplementedError\n\n\nclass Slice(LazyValueWrapper):\n def __init__(self, python_context, start, stop, step):\n self.inference_state = python_context.inference_state\n self._context = python_context\n # All of them are either a Precedence or None.\n self._start = start\n self._stop = stop\n self._step = step\n\n def _get_wrapped_value(self):\n value = compiled.builtin_from_name(self._context.inference_state, 'slice')\n slice_value, = value.execute_with_values()\n return slice_value\n\n def get_safe_value(self, default=sentinel):\n """\n Imitate CompiledValue.obj behavior and return a ``builtin.slice()``\n object.\n """\n def get(element):\n if element is None:\n return None\n\n result = self._context.infer_node(element)\n if len(result) != 1:\n # For simplicity, we want slices to be clear defined with just\n # one type. Otherwise we will return an empty slice object.\n raise IndexError\n\n value, = result\n return get_int_or_none(value)\n\n try:\n return slice(get(self._start), get(self._stop), get(self._step))\n except IndexError:\n return slice(None, None, None)\n
|
.venv\Lib\site-packages\jedi\inference\value\iterable.py
|
iterable.py
|
Python
| 23,305 | 0.95 | 0.267388 | 0.065385 |
awesome-app
| 226 |
2024-11-25T10:44:59.818579
|
Apache-2.0
| false |
14f3f54c3fd7076f67d376aa65d72ce3
|
import os\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom jedi.inference.cache import inference_state_method_cache\nfrom jedi.inference.names import AbstractNameDefinition, ModuleName\nfrom jedi.inference.filters import GlobalNameFilter, ParserTreeFilter, DictFilter, MergedFilter\nfrom jedi.inference import compiled\nfrom jedi.inference.base_value import TreeValue\nfrom jedi.inference.names import SubModuleName\nfrom jedi.inference.helpers import values_from_qualified_names\nfrom jedi.inference.compiled import create_simple_object\nfrom jedi.inference.base_value import ValueSet\nfrom jedi.inference.context import ModuleContext\n\n\nclass _ModuleAttributeName(AbstractNameDefinition):\n """\n For module attributes like __file__, __str__ and so on.\n """\n api_type = 'instance'\n\n def __init__(self, parent_module, string_name, string_value=None):\n self.parent_context = parent_module\n self.string_name = string_name\n self._string_value = string_value\n\n def infer(self):\n if self._string_value is not None:\n s = self._string_value\n return ValueSet([\n create_simple_object(self.parent_context.inference_state, s)\n ])\n return compiled.get_string_value_set(self.parent_context.inference_state)\n\n\nclass SubModuleDictMixin:\n @inference_state_method_cache()\n def sub_modules_dict(self):\n """\n Lists modules in the directory of this module (if this module is a\n package).\n """\n names = {}\n if self.is_package():\n mods = self.inference_state.compiled_subprocess.iter_module_names(\n self.py__path__()\n )\n for name in mods:\n # It's obviously a relative import to the current module.\n names[name] = SubModuleName(self.as_context(), name)\n\n # In the case of an import like `from x.` we don't need to\n # add all the variables, this is only about submodules.\n return names\n\n\nclass ModuleMixin(SubModuleDictMixin):\n _module_name_class = ModuleName\n\n def get_filters(self, origin_scope=None):\n yield MergedFilter(\n ParserTreeFilter(\n parent_context=self.as_context(),\n origin_scope=origin_scope\n ),\n GlobalNameFilter(self.as_context()),\n )\n yield DictFilter(self.sub_modules_dict())\n yield DictFilter(self._module_attributes_dict())\n yield from self.iter_star_filters()\n\n def py__class__(self):\n c, = values_from_qualified_names(self.inference_state, 'types', 'ModuleType')\n return c\n\n def is_module(self):\n return True\n\n def is_stub(self):\n return False\n\n @property # type: ignore[misc]\n @inference_state_method_cache()\n def name(self):\n return self._module_name_class(self, self.string_names[-1])\n\n @inference_state_method_cache()\n def _module_attributes_dict(self):\n names = ['__package__', '__doc__', '__name__']\n # All the additional module attributes are strings.\n dct = dict((n, _ModuleAttributeName(self, n)) for n in names)\n path = self.py__file__()\n if path is not None:\n dct['__file__'] = _ModuleAttributeName(self, '__file__', str(path))\n return dct\n\n def iter_star_filters(self):\n for star_module in self.star_imports():\n f = next(star_module.get_filters(), None)\n assert f is not None\n yield f\n\n # I'm not sure if the star import cache is really that effective anymore\n # with all the other really fast import caches. Recheck. Also we would need\n # to push the star imports into InferenceState.module_cache, if we reenable this.\n @inference_state_method_cache([])\n def star_imports(self):\n from jedi.inference.imports import Importer\n\n modules = []\n module_context = self.as_context()\n for i in self.tree_node.iter_imports():\n if i.is_star_import():\n new = Importer(\n self.inference_state,\n import_path=i.get_paths()[-1],\n module_context=module_context,\n level=i.level\n ).follow()\n\n for module in new:\n if isinstance(module, ModuleValue):\n modules += module.star_imports()\n modules += new\n return modules\n\n def get_qualified_names(self):\n """\n A module doesn't have a qualified name, but it's important to note that\n it's reachable and not `None`. With this information we can add\n qualified names on top for all value children.\n """\n return ()\n\n\nclass ModuleValue(ModuleMixin, TreeValue):\n api_type = 'module'\n\n def __init__(self, inference_state, module_node, code_lines, file_io=None,\n string_names=None, is_package=False):\n super().__init__(\n inference_state,\n parent_context=None,\n tree_node=module_node\n )\n self.file_io = file_io\n if file_io is None:\n self._path: Optional[Path] = None\n else:\n self._path = file_io.path\n self.string_names = string_names # Optional[Tuple[str, ...]]\n self.code_lines = code_lines\n self._is_package = is_package\n\n def is_stub(self):\n if self._path is not None and self._path.suffix == '.pyi':\n # Currently this is the way how we identify stubs when e.g. goto is\n # used in them. This could be changed if stubs would be identified\n # sooner and used as StubModuleValue.\n return True\n return super().is_stub()\n\n def py__name__(self):\n if self.string_names is None:\n return None\n return '.'.join(self.string_names)\n\n def py__file__(self) -> Optional[Path]:\n """\n In contrast to Python's __file__ can be None.\n """\n if self._path is None:\n return None\n\n return self._path.absolute()\n\n def is_package(self):\n return self._is_package\n\n def py__package__(self):\n if self.string_names is None:\n return []\n\n if self._is_package:\n return self.string_names\n return self.string_names[:-1]\n\n def py__path__(self):\n """\n In case of a package, this returns Python's __path__ attribute, which\n is a list of paths (strings).\n Returns None if the module is not a package.\n """\n if not self._is_package:\n return None\n\n # A namespace package is typically auto generated and ~10 lines long.\n first_few_lines = ''.join(self.code_lines[:50])\n # these are strings that need to be used for namespace packages,\n # the first one is ``pkgutil``, the second ``pkg_resources``.\n options = ('declare_namespace(__name__)', 'extend_path(__path__')\n if options[0] in first_few_lines or options[1] in first_few_lines:\n # It is a namespace, now try to find the rest of the\n # modules on sys_path or whatever the search_path is.\n paths = set()\n for s in self.inference_state.get_sys_path():\n other = os.path.join(s, self.name.string_name)\n if os.path.isdir(other):\n paths.add(other)\n if paths:\n return list(paths)\n # Nested namespace packages will not be supported. Nobody ever\n # asked for it and in Python 3 they are there without using all the\n # crap above.\n\n # Default to the of this file.\n file = self.py__file__()\n assert file is not None # Shouldn't be a package in the first place.\n return [os.path.dirname(file)]\n\n def _as_context(self):\n return ModuleContext(self)\n\n def __repr__(self):\n return "<%s: %s@%s-%s is_stub=%s>" % (\n self.__class__.__name__, self.py__name__(),\n self.tree_node.start_pos[0], self.tree_node.end_pos[0],\n self.is_stub()\n )\n
|
.venv\Lib\site-packages\jedi\inference\value\module.py
|
module.py
|
Python
| 8,118 | 0.95 | 0.23913 | 0.097938 |
vue-tools
| 810 |
2024-10-23T03:48:34.780216
|
GPL-3.0
| false |
fe304a47032485e8094fd39bee85ee7a
|
from pathlib import Path\nfrom typing import Optional\n\nfrom jedi.inference.cache import inference_state_method_cache\nfrom jedi.inference.filters import DictFilter\nfrom jedi.inference.names import ValueNameMixin, AbstractNameDefinition\nfrom jedi.inference.base_value import Value\nfrom jedi.inference.value.module import SubModuleDictMixin\nfrom jedi.inference.context import NamespaceContext\n\n\nclass ImplicitNSName(ValueNameMixin, AbstractNameDefinition):\n """\n Accessing names for implicit namespace packages should infer to nothing.\n This object will prevent Jedi from raising exceptions\n """\n def __init__(self, implicit_ns_value, string_name):\n self._value = implicit_ns_value\n self.string_name = string_name\n\n\nclass ImplicitNamespaceValue(Value, SubModuleDictMixin):\n """\n Provides support for implicit namespace packages\n """\n api_type = 'namespace'\n parent_context = None\n\n def __init__(self, inference_state, string_names, paths):\n super().__init__(inference_state, parent_context=None)\n self.inference_state = inference_state\n self.string_names = string_names\n self._paths = paths\n\n def get_filters(self, origin_scope=None):\n yield DictFilter(self.sub_modules_dict())\n\n def get_qualified_names(self):\n return ()\n\n @property # type: ignore[misc]\n @inference_state_method_cache()\n def name(self):\n string_name = self.py__package__()[-1]\n return ImplicitNSName(self, string_name)\n\n def py__file__(self) -> Optional[Path]:\n return None\n\n def py__package__(self):\n """Return the fullname\n """\n return self.string_names\n\n def py__path__(self):\n return self._paths\n\n def py__name__(self):\n return '.'.join(self.string_names)\n\n def is_namespace(self):\n return True\n\n def is_stub(self):\n return False\n\n def is_package(self):\n return True\n\n def as_context(self):\n return NamespaceContext(self)\n\n def __repr__(self):\n return '<%s: %s>' % (self.__class__.__name__, self.py__name__())\n
|
.venv\Lib\site-packages\jedi\inference\value\namespace.py
|
namespace.py
|
Python
| 2,101 | 0.95 | 0.243243 | 0 |
vue-tools
| 10 |
2024-01-08T05:08:20.055990
|
Apache-2.0
| false |
456e15bb2b9732e2e349a05107a55fe5
|
# Re-export symbols for wider use. We configure mypy and flake8 to be aware that\n# this file does this.\n\nfrom jedi.inference.value.module import ModuleValue\nfrom jedi.inference.value.klass import ClassValue\nfrom jedi.inference.value.function import FunctionValue, \\n MethodValue\nfrom jedi.inference.value.instance import AnonymousInstance, BoundMethod, \\n CompiledInstance, AbstractInstanceValue, TreeInstance\n
|
.venv\Lib\site-packages\jedi\inference\value\__init__.py
|
__init__.py
|
Python
| 416 | 0.95 | 0.222222 | 0.25 |
awesome-app
| 445 |
2023-08-15T21:21:03.082345
|
GPL-3.0
| false |
42d9c9e9ee3f5c0cf86480ab87a302cc
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\value\__pycache__\decorator.cpython-313.pyc
|
decorator.cpython-313.pyc
|
Other
| 2,166 | 0.8 | 0 | 0 |
vue-tools
| 866 |
2023-10-11T11:58:38.065448
|
MIT
| false |
fc092fcb4ea924aa7ca38740646133bf
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\value\__pycache__\dynamic_arrays.cpython-313.pyc
|
dynamic_arrays.cpython-313.pyc
|
Other
| 10,863 | 0.95 | 0.073394 | 0.01 |
node-utils
| 961 |
2024-01-18T21:33:29.375530
|
Apache-2.0
| false |
7d4055cbf1072dd557812da2d80f4df6
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\value\__pycache__\function.cpython-313.pyc
|
function.cpython-313.pyc
|
Other
| 26,562 | 0.95 | 0.011628 | 0.011905 |
awesome-app
| 929 |
2025-02-01T22:17:15.027820
|
Apache-2.0
| false |
79eee6236537d1aa965c1422f361b404
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\value\__pycache__\instance.cpython-313.pyc
|
instance.cpython-313.pyc
|
Other
| 36,279 | 0.95 | 0.030303 | 0 |
vue-tools
| 324 |
2023-10-25T20:34:26.233787
|
GPL-3.0
| false |
2e78f5320d2adedfc3ca89cd8302d790
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\value\__pycache__\iterable.cpython-313.pyc
|
iterable.cpython-313.pyc
|
Other
| 37,510 | 0.95 | 0.017699 | 0.022727 |
awesome-app
| 369 |
2025-01-17T21:58:32.295736
|
GPL-3.0
| false |
d775318c17325db6b7834737ba51f3e1
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\value\__pycache__\klass.cpython-313.pyc
|
klass.cpython-313.pyc
|
Other
| 19,241 | 0.95 | 0.082803 | 0.013793 |
vue-tools
| 930 |
2024-09-13T02:39:23.988838
|
BSD-3-Clause
| false |
b33b26208c9df6718b07998b1f6ef3cd
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\value\__pycache__\module.cpython-313.pyc
|
module.cpython-313.pyc
|
Other
| 10,992 | 0.95 | 0.035714 | 0 |
python-kit
| 351 |
2025-06-04T23:33:28.223164
|
Apache-2.0
| false |
e617e0bff9e04c09dffdb15603415207
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\value\__pycache__\namespace.cpython-313.pyc
|
namespace.cpython-313.pyc
|
Other
| 4,318 | 0.8 | 0.0625 | 0 |
vue-tools
| 78 |
2023-11-09T05:17:05.654020
|
Apache-2.0
| false |
0912e187175b1ad426f8509feee04fb6
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\value\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 600 | 0.85 | 0 | 0 |
python-kit
| 693 |
2025-04-28T02:50:58.040643
|
BSD-3-Clause
| false |
4a227f28c09301d5b95dd1e3f9f320fe
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\analysis.cpython-313.pyc
|
analysis.cpython-313.pyc
|
Other
| 11,359 | 0.95 | 0.095238 | 0.012346 |
python-kit
| 502 |
2024-03-08T23:22:20.934880
|
MIT
| false |
7dba2146ded4fd13d8fa07ba97efc64d
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\arguments.cpython-313.pyc
|
arguments.cpython-313.pyc
|
Other
| 16,319 | 0.95 | 0.029412 | 0.030928 |
react-lib
| 130 |
2025-06-21T19:39:36.983666
|
Apache-2.0
| false |
8e8874374ccaa908f659a3768863cc6f
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\base_value.cpython-313.pyc
|
base_value.cpython-313.pyc
|
Other
| 31,803 | 0.95 | 0.046083 | 0 |
python-kit
| 466 |
2024-06-16T15:32:13.116024
|
MIT
| false |
1dcae444b9522128735327e7a54a97ce
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\cache.cpython-313.pyc
|
cache.cpython-313.pyc
|
Other
| 5,619 | 0.95 | 0.089286 | 0.018868 |
react-lib
| 360 |
2024-08-06T20:54:37.803417
|
MIT
| false |
2f4ad8a4ac627a8aeee5a6075c045103
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\context.cpython-313.pyc
|
context.cpython-313.pyc
|
Other
| 24,691 | 0.95 | 0.047872 | 0 |
vue-tools
| 116 |
2024-05-26T01:38:18.992432
|
BSD-3-Clause
| false |
705c4f200351571fabf0e48f6952cc66
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\docstrings.cpython-313.pyc
|
docstrings.cpython-313.pyc
|
Other
| 13,250 | 0.95 | 0.065789 | 0.079137 |
vue-tools
| 444 |
2025-02-07T13:49:33.455598
|
Apache-2.0
| false |
5bf1f502f0acdb89ba47911cb230ca51
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\docstring_utils.cpython-313.pyc
|
docstring_utils.cpython-313.pyc
|
Other
| 1,923 | 0.8 | 0 | 0 |
node-utils
| 199 |
2023-09-29T04:32:15.542684
|
GPL-3.0
| false |
906b970d4db7fa0e7c7f51a04f9b5cc8
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\dynamic_params.cpython-313.pyc
|
dynamic_params.cpython-313.pyc
|
Other
| 9,009 | 0.95 | 0.085106 | 0 |
vue-tools
| 98 |
2024-04-04T11:04:46.416185
|
Apache-2.0
| false |
914aa9d51b90c051187c19b1fd9a774b
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\filters.cpython-313.pyc
|
filters.cpython-313.pyc
|
Other
| 22,042 | 0.95 | 0.057971 | 0.007463 |
react-lib
| 19 |
2025-01-13T02:43:29.448306
|
BSD-3-Clause
| false |
70178636b5e497f27b2a60f4161be988
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\finder.cpython-313.pyc
|
finder.cpython-313.pyc
|
Other
| 6,683 | 0.8 | 0.064516 | 0 |
vue-tools
| 94 |
2024-09-28T05:13:25.491862
|
GPL-3.0
| false |
bb5ccd0fac3df50a89d2d7ec38da0dac
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\flow_analysis.cpython-313.pyc
|
flow_analysis.cpython-313.pyc
|
Other
| 5,445 | 0.8 | 0 | 0 |
node-utils
| 42 |
2024-08-26T21:54:09.994509
|
GPL-3.0
| false |
c31789f67960606dec4e821eddf5b8a6
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\helpers.cpython-313.pyc
|
helpers.cpython-313.pyc
|
Other
| 7,890 | 0.95 | 0.061728 | 0 |
node-utils
| 217 |
2024-02-13T09:37:20.640549
|
Apache-2.0
| false |
f2169df315a15091341ecb0aaee0bda8
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\imports.cpython-313.pyc
|
imports.cpython-313.pyc
|
Other
| 22,189 | 0.95 | 0.027523 | 0.009662 |
python-kit
| 199 |
2025-04-10T00:59:46.846293
|
Apache-2.0
| false |
74da2cff11bf453129453c7335e8f96e
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\lazy_value.cpython-313.pyc
|
lazy_value.cpython-313.pyc
|
Other
| 4,362 | 0.8 | 0 | 0 |
python-kit
| 769 |
2024-06-10T21:39:09.993172
|
Apache-2.0
| false |
bd143497ed57f2598986b7aa7c484e35
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\names.cpython-313.pyc
|
names.cpython-313.pyc
|
Other
| 35,050 | 0.95 | 0.043902 | 0 |
vue-tools
| 298 |
2025-04-21T21:24:47.874078
|
BSD-3-Clause
| false |
a345d7404ab0cf4bd266d0302220a35f
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\param.cpython-313.pyc
|
param.cpython-313.pyc
|
Other
| 11,004 | 0.95 | 0.123894 | 0 |
vue-tools
| 94 |
2024-08-09T04:39:54.095857
|
GPL-3.0
| false |
756ae031580e31e4d937a8ada0ef6239
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\parser_cache.cpython-313.pyc
|
parser_cache.cpython-313.pyc
|
Other
| 530 | 0.85 | 0 | 0 |
node-utils
| 971 |
2025-01-08T18:04:17.932849
|
Apache-2.0
| false |
893800124d9ab96267c3835447c79449
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\recursion.cpython-313.pyc
|
recursion.cpython-313.pyc
|
Other
| 6,206 | 0.95 | 0.096774 | 0 |
react-lib
| 374 |
2024-09-29T07:45:25.447649
|
Apache-2.0
| false |
ffe219ed6b7d4e2b033298cb042bd51b
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\references.cpython-313.pyc
|
references.cpython-313.pyc
|
Other
| 14,129 | 0.95 | 0 | 0.02 |
node-utils
| 377 |
2024-07-13T12:36:25.654900
|
Apache-2.0
| false |
089626a70218f5490a8937161f81e11e
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\signature.cpython-313.pyc
|
signature.cpython-313.pyc
|
Other
| 8,642 | 0.95 | 0 | 0 |
vue-tools
| 71 |
2024-12-13T07:42:10.779653
|
Apache-2.0
| false |
244477b6e9a9d4b65bf616bf8b6076c4
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\star_args.cpython-313.pyc
|
star_args.cpython-313.pyc
|
Other
| 9,476 | 0.95 | 0.05 | 0.00885 |
react-lib
| 604 |
2024-11-29T19:24:43.694280
|
GPL-3.0
| false |
19e9128df055e75636248f0ad3f2363e
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\syntax_tree.cpython-313.pyc
|
syntax_tree.cpython-313.pyc
|
Other
| 40,797 | 0.95 | 0.030641 | 0.005935 |
node-utils
| 455 |
2024-06-16T22:58:33.223306
|
GPL-3.0
| false |
7e9d24a51c296820cda07463e7b8eacc
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\sys_path.cpython-313.pyc
|
sys_path.cpython-313.pyc
|
Other
| 12,101 | 0.95 | 0.043478 | 0 |
awesome-app
| 285 |
2024-02-26T22:39:51.189616
|
Apache-2.0
| false |
a943e66567efaf7dfdddfd87be19b91e
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\utils.cpython-313.pyc
|
utils.cpython-313.pyc
|
Other
| 5,052 | 0.95 | 0.148936 | 0 |
awesome-app
| 20 |
2024-04-03T12:00:09.172385
|
BSD-3-Clause
| false |
310a3b4c94db2fdd17b732e2bd4158c7
|
\n\n
|
.venv\Lib\site-packages\jedi\inference\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 10,490 | 0.95 | 0.087719 | 0.030612 |
vue-tools
| 437 |
2023-09-21T18:06:38.821930
|
Apache-2.0
| false |
f29351638981c460131d97cd633ab443
|
def import_module(callback):\n """\n Handle "magic" Flask extension imports:\n ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``.\n """\n def wrapper(inference_state, import_names, module_context, *args, **kwargs):\n if len(import_names) == 3 and import_names[:2] == ('flask', 'ext'):\n # New style.\n ipath = ('flask_' + import_names[2]),\n value_set = callback(inference_state, ipath, None, *args, **kwargs)\n if value_set:\n return value_set\n value_set = callback(inference_state, ('flaskext',), None, *args, **kwargs)\n return callback(\n inference_state,\n ('flaskext', import_names[2]),\n next(iter(value_set)),\n *args, **kwargs\n )\n return callback(inference_state, import_names, module_context, *args, **kwargs)\n return wrapper\n
|
.venv\Lib\site-packages\jedi\plugins\flask.py
|
flask.py
|
Python
| 916 | 0.95 | 0.190476 | 0.095238 |
node-utils
| 545 |
2025-01-14T13:36:02.929146
|
GPL-3.0
| false |
b9ae60c63e9bbdd2128904c12d75c0cc
|
"""\nThis is not a plugin, this is just the place were plugins are registered.\n"""\n\nfrom jedi.plugins import stdlib\nfrom jedi.plugins import flask\nfrom jedi.plugins import pytest\nfrom jedi.plugins import django\nfrom jedi.plugins import plugin_manager\n\n\nplugin_manager.register(stdlib, flask, pytest, django)\n
|
.venv\Lib\site-packages\jedi\plugins\registry.py
|
registry.py
|
Python
| 307 | 0.85 | 0 | 0 |
python-kit
| 727 |
2025-01-02T03:15:30.014978
|
BSD-3-Clause
| false |
1954f39779fc5805f273e7b55e7a6d45
|
from functools import wraps\n\n\nclass _PluginManager:\n def __init__(self):\n self._registered_plugins = []\n self._cached_base_callbacks = {}\n self._built_functions = {}\n\n def register(self, *plugins):\n """\n Makes it possible to register your plugin.\n """\n self._registered_plugins.extend(plugins)\n self._build_functions()\n\n def decorate(self, name=None):\n def decorator(callback):\n @wraps(callback)\n def wrapper(*args, **kwargs):\n return built_functions[public_name](*args, **kwargs)\n\n public_name = name or callback.__name__\n\n assert public_name not in self._built_functions\n built_functions = self._built_functions\n built_functions[public_name] = callback\n self._cached_base_callbacks[public_name] = callback\n\n return wrapper\n\n return decorator\n\n def _build_functions(self):\n for name, callback in self._cached_base_callbacks.items():\n for plugin in reversed(self._registered_plugins):\n # Need to reverse so the first plugin is run first.\n try:\n func = getattr(plugin, name)\n except AttributeError:\n pass\n else:\n callback = func(callback)\n self._built_functions[name] = callback\n\n\nplugin_manager = _PluginManager()\n
|
.venv\Lib\site-packages\jedi\plugins\__init__.py
|
__init__.py
|
Python
| 1,445 | 0.95 | 0.212766 | 0.027778 |
vue-tools
| 703 |
2025-02-17T17:06:15.948882
|
BSD-3-Clause
| false |
646595e9b20b6465c9c364db588d5d1a
|
\n\n
|
.venv\Lib\site-packages\jedi\plugins\__pycache__\django.cpython-313.pyc
|
django.cpython-313.pyc
|
Other
| 17,117 | 0.95 | 0.009091 | 0 |
vue-tools
| 442 |
2024-06-15T21:57:30.811272
|
BSD-3-Clause
| false |
da607c2a8659cbcc6b14eb3f41c11e13
|
\n\n
|
.venv\Lib\site-packages\jedi\plugins\__pycache__\flask.cpython-313.pyc
|
flask.cpython-313.pyc
|
Other
| 1,123 | 0.85 | 0 | 0 |
node-utils
| 796 |
2024-02-16T07:22:47.629512
|
MIT
| false |
9eb428ec25e6e244a1a54fa7fa22738a
|
\n\n
|
.venv\Lib\site-packages\jedi\plugins\__pycache__\pytest.cpython-313.pyc
|
pytest.cpython-313.pyc
|
Other
| 13,545 | 0.95 | 0.010101 | 0 |
vue-tools
| 566 |
2024-07-20T16:18:27.416679
|
BSD-3-Clause
| true |
d44d03f5deba1f0972a6f01b0090e06d
|
\n\n
|
.venv\Lib\site-packages\jedi\plugins\__pycache__\registry.cpython-313.pyc
|
registry.cpython-313.pyc
|
Other
| 531 | 0.7 | 0 | 0 |
vue-tools
| 915 |
2024-03-26T07:49:47.839483
|
Apache-2.0
| false |
a3fa7b4a13ba8ae67d5413f2188bd6c3
|
\n\n
|
.venv\Lib\site-packages\jedi\plugins\__pycache__\stdlib.cpython-313.pyc
|
stdlib.cpython-313.pyc
|
Other
| 44,850 | 0.95 | 0.071429 | 0.016077 |
vue-tools
| 165 |
2023-11-18T20:00:05.594417
|
MIT
| false |
9dc28ad3b90aab2b0a220dc6f2593755
|
\n\n
|
.venv\Lib\site-packages\jedi\plugins\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 2,544 | 0.95 | 0 | 0 |
react-lib
| 908 |
2024-09-15T03:54:12.434288
|
MIT
| false |
3b94c94d0242caf17f7138a4063706ef
|
Copyright (c) Maxim Kurnikov.\nAll rights reserved.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n
|
.venv\Lib\site-packages\jedi\third_party\django-stubs\LICENSE.txt
|
LICENSE.txt
|
Other
| 1,075 | 0.7 | 0 | 0 |
react-lib
| 578 |
2023-09-30T04:28:16.378323
|
MIT
| false |
0d8b10b099d9c3e63d4293166344001f
|
import sys\nfrom typing import Any, Callable, List, Mapping, Optional, overload, Protocol, Sequence, Type, TypeVar, Union\n\nfrom django.db.models.base import Model\nfrom django.http.response import (\n HttpResponse as HttpResponse,\n HttpResponseRedirect as HttpResponseRedirect,\n HttpResponsePermanentRedirect as HttpResponsePermanentRedirect,\n)\n\nfrom django.db.models import Manager, QuerySet\nfrom django.http import HttpRequest\n\nif sys.version_info < (3, 8):\n from typing_extensions import Literal\nelse:\n from typing import Literal\n\ndef render_to_response(\n template_name: Union[str, Sequence[str]],\n context: Optional[Mapping[str, Any]] = ...,\n content_type: Optional[str] = ...,\n status: Optional[int] = ...,\n using: Optional[str] = ...,\n) -> HttpResponse: ...\ndef render(\n request: HttpRequest,\n template_name: Union[str, Sequence[str]],\n context: Optional[Mapping[str, Any]] = ...,\n content_type: Optional[str] = ...,\n status: Optional[int] = ...,\n using: Optional[str] = ...,\n) -> HttpResponse: ...\n\nclass SupportsGetAbsoluteUrl(Protocol): ...\n\n@overload\ndef redirect(\n to: Union[Callable, str, SupportsGetAbsoluteUrl], *args: Any, permanent: Literal[True], **kwargs: Any\n) -> HttpResponsePermanentRedirect: ...\n@overload\ndef redirect(\n to: Union[Callable, str, SupportsGetAbsoluteUrl], *args: Any, permanent: Literal[False], **kwargs: Any\n) -> HttpResponseRedirect: ...\n@overload\ndef redirect(\n to: Union[Callable, str, SupportsGetAbsoluteUrl], *args: Any, permanent: bool = ..., **kwargs: Any\n) -> Union[HttpResponseRedirect, HttpResponsePermanentRedirect]: ...\n\n_T = TypeVar("_T", bound=Model)\n\ndef get_object_or_404(klass: Union[Type[_T], Manager[_T], QuerySet[_T]], *args: Any, **kwargs: Any) -> _T: ...\ndef get_list_or_404(klass: Union[Type[_T], Manager[_T], QuerySet[_T]], *args: Any, **kwargs: Any) -> List[_T]: ...\ndef resolve_url(to: Union[Callable, Model, str], *args: Any, **kwargs: Any) -> str: ...\n
|
.venv\Lib\site-packages\jedi\third_party\django-stubs\django-stubs\shortcuts.pyi
|
shortcuts.pyi
|
Other
| 1,972 | 0.85 | 0.185185 | 0 |
awesome-app
| 591 |
2023-09-08T21:15:57.933452
|
GPL-3.0
| false |
abe26686fc4e7106db77942bf7981948
|
from typing import Any, NamedTuple\nfrom .utils.version import get_version as get_version\n\nVERSION: Any\n__version__: str\n\ndef setup(set_prefix: bool = ...) -> None: ...\n\n# Used by mypy_django_plugin when returning a QuerySet row that is a NamedTuple where the field names are unknown\nclass _NamedTupleAnyAttr(NamedTuple):\n def __getattr__(self, item: str) -> Any: ...\n def __setattr__(self, item: str, value: Any) -> None: ...\n
|
.venv\Lib\site-packages\jedi\third_party\django-stubs\django-stubs\__init__.pyi
|
__init__.pyi
|
Other
| 432 | 0.95 | 0.333333 | 0.111111 |
awesome-app
| 341 |
2024-10-01T04:14:23.168050
|
Apache-2.0
| false |
b744c66ec089d09a12ebf936f79f6d1e
|
from typing import Any, Iterator, Type, Optional, Dict\n\nfrom django.apps.registry import Apps\nfrom django.db.models.base import Model\n\nMODELS_MODULE_NAME: str\n\nclass AppConfig:\n name: str = ...\n module: Optional[Any] = ...\n apps: Optional[Apps] = ...\n label: str = ...\n verbose_name: str = ...\n path: str = ...\n models_module: Optional[str] = ...\n models: Dict[str, Type[Model]] = ...\n def __init__(self, app_name: str, app_module: Optional[Any]) -> None: ...\n @classmethod\n def create(cls, entry: str) -> AppConfig: ...\n def get_model(self, model_name: str, require_ready: bool = ...) -> Type[Model]: ...\n def get_models(self, include_auto_created: bool = ..., include_swapped: bool = ...) -> Iterator[Type[Model]]: ...\n def import_models(self) -> None: ...\n def ready(self) -> None: ...\n
|
.venv\Lib\site-packages\jedi\third_party\django-stubs\django-stubs\apps\config.pyi
|
config.pyi
|
Other
| 834 | 0.85 | 0.304348 | 0 |
react-lib
| 481 |
2023-10-10T06:28:59.625385
|
BSD-3-Clause
| false |
37044dcce30748cbd17946efb513d83f
|
import threading\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, Union\n\nfrom django.db.models.base import Model\n\nfrom .config import AppConfig\n\nclass Apps:\n all_models: Dict[str, Dict[str, Type[Model]]] = ...\n app_configs: Dict[str, AppConfig] = ...\n stored_app_configs: List[Any] = ...\n apps_ready: bool = ...\n ready_event: threading.Event = ...\n loading: bool = ...\n _pending_operations: Dict[Tuple[str, str], List]\n models_ready: bool = ...\n ready: bool = ...\n def __init__(self, installed_apps: Optional[Iterable[Union[AppConfig, str]]] = ...) -> None: ...\n def populate(self, installed_apps: Iterable[Union[AppConfig, str]] = ...) -> None: ...\n def check_apps_ready(self) -> None: ...\n def check_models_ready(self) -> None: ...\n def get_app_configs(self) -> Iterable[AppConfig]: ...\n def get_app_config(self, app_label: str) -> AppConfig: ...\n # it's not possible to support it in plugin properly now\n def get_models(self, include_auto_created: bool = ..., include_swapped: bool = ...) -> List[Type[Model]]: ...\n def get_model(self, app_label: str, model_name: Optional[str] = ..., require_ready: bool = ...) -> Type[Any]: ...\n def register_model(self, app_label: str, model: Type[Model]) -> None: ...\n def is_installed(self, app_name: str) -> bool: ...\n def get_containing_app_config(self, object_name: str) -> Optional[AppConfig]: ...\n def get_registered_model(self, app_label: str, model_name: str) -> Type[Model]: ...\n def get_swappable_settings_name(self, to_string: str) -> Optional[str]: ...\n def set_available_apps(self, available: Iterable[str]) -> None: ...\n def unset_available_apps(self) -> None: ...\n def set_installed_apps(self, installed: Iterable[str]) -> None: ...\n def unset_installed_apps(self) -> None: ...\n def clear_cache(self) -> None: ...\n def lazy_model_operation(self, function: Callable, *model_keys: Any) -> None: ...\n def do_pending_operations(self, model: Type[Model]) -> None: ...\n\napps: Apps\n
|
.venv\Lib\site-packages\jedi\third_party\django-stubs\django-stubs\apps\registry.pyi
|
registry.pyi
|
Other
| 2,050 | 0.95 | 0.55 | 0.027778 |
react-lib
| 849 |
2024-12-04T05:58:59.808088
|
Apache-2.0
| false |
9926d763c27e5b258dda39dd2de77c6b
|
from .config import AppConfig as AppConfig\n\nfrom .registry import apps as apps\n
|
.venv\Lib\site-packages\jedi\third_party\django-stubs\django-stubs\apps\__init__.pyi
|
__init__.pyi
|
Other
| 79 | 0.65 | 0 | 0 |
node-utils
| 992 |
2025-04-10T11:18:52.603993
|
MIT
| false |
b60837fbd6193ca4695c1d014606d434
|
"""\nDefault Django settings. Override these with settings in the module pointed to\nby the DJANGO_SETTINGS_MODULE environment variable.\n"""\n\n# This is defined here as a do-nothing function because we can't import\n# django.utils.translation -- that module depends on the settings.\nfrom typing import Any, Dict, List, Optional, Pattern, Protocol, Sequence, Tuple, Union\n\n####################\n# CORE #\n####################\nDEBUG: bool = ...\n\n# Whether the framework should propagate raw exceptions rather than catching\n# them. This is useful under some testing situations and should never be used\n# on a live site.\nDEBUG_PROPAGATE_EXCEPTIONS: bool = ...\n\n# People who get code error notifications.\n# In the format [('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com')]\nADMINS: List[Tuple[str, str]] = ...\n\n# List of IP addresses, as strings, that:\n# * See debug comments, when DEBUG is true\n# * Receive x-headers\nINTERNAL_IPS: List[str] = ...\n\n# Hosts/domain names that are valid for this site.\n# "*" matches anything, ".example.com" matches example.com and all subdomains\nALLOWED_HOSTS: List[str] = ...\n\n# Local time zone for this installation. All choices can be found here:\n# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all\n# systems may support all possibilities). When USE_TZ is True, this is\n# interpreted as the default user time zone.\nTIME_ZONE: str = ...\n\n# If you set this to True, Django will use timezone-aware datetimes.\nUSE_TZ: bool = ...\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE: str = ...\n\n# Languages we provide translations for, out of the box.\nLANGUAGES: List[Tuple[str, str]] = ...\n\n# Languages using BiDi (right-to-left) layout\nLANGUAGES_BIDI: List[str] = ...\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N: bool = ...\nLOCALE_PATHS: List[str] = ...\n\n# Settings for language cookie\nLANGUAGE_COOKIE_NAME: str = ...\nLANGUAGE_COOKIE_AGE: Optional[int] = ...\nLANGUAGE_COOKIE_DOMAIN: Optional[str] = ...\nLANGUAGE_COOKIE_PATH: str = ...\n\n# If you set this to True, Django will format dates, numbers and calendars\n# according to user current locale.\nUSE_L10N: bool = ...\n\n# Not-necessarily-technical managers of the site. They get broken link\n# notifications and other various emails.\nMANAGERS = ADMINS\n\n# Default content type and charset to use for all HttpResponse objects, if a\n# MIME type isn't manually specified. These are used to construct the\n# Content-Type header.\nDEFAULT_CONTENT_TYPE: str = ...\nDEFAULT_CHARSET: str = ...\n\n# Encoding of files read from disk (template and initial SQL files).\nFILE_CHARSET: str = ...\n\n# Email address that error messages come from.\nSERVER_EMAIL: str = ...\n\n# Database connection info. If left empty, will default to the dummy backend.\nDATABASES: Dict[str, Dict[str, Any]] = ...\n\n# Classes used to implement DB routing behavior.\nclass Router(Protocol):\n def allow_migrate(self, db, app_label, **hints): ...\n\nDATABASE_ROUTERS: List[Union[str, Router]] = ...\n\n# The email backend to use. For possible shortcuts see django.core.mail.\n# The default is to use the SMTP backend.\n# Third-party backends can be specified by providing a Python path\n# to a module that defines an EmailBackend class.\nEMAIL_BACKEND: str = ...\n\n# Host for sending email.\nEMAIL_HOST: str = ...\n\n# Port for sending email.\nEMAIL_PORT: int = ...\n\n# Whether to send SMTP 'Date' header in the local time zone or in UTC.\nEMAIL_USE_LOCALTIME: bool = ...\n\n# Optional SMTP authentication information for EMAIL_HOST.\nEMAIL_HOST_USER: str = ...\nEMAIL_HOST_PASSWORD: str = ...\nEMAIL_USE_TLS: bool = ...\nEMAIL_USE_SSL: bool = ...\nEMAIL_SSL_CERTFILE: Optional[str] = ...\nEMAIL_SSL_KEYFILE: Optional[str] = ...\nEMAIL_TIMEOUT: Optional[int] = ...\n\n# List of strings representing installed apps.\nINSTALLED_APPS: List[str] = ...\n\nTEMPLATES: List[Dict[str, Any]] = ...\n\n# Default form rendering class.\nFORM_RENDERER: str = ...\n\n# Default email address to use for various automated correspondence from\n# the site managers.\nDEFAULT_FROM_EMAIL: str = ...\n\n# Subject-line prefix for email messages send with django.core.mail.mail_admins\n# or ...mail_managers. Make sure to include the trailing space.\nEMAIL_SUBJECT_PREFIX: str = ...\n\n# Whether to append trailing slashes to URLs.\nAPPEND_SLASH: bool = ...\n\n# Whether to prepend the "www." subdomain to URLs that don't have it.\nPREPEND_WWW: bool = ...\n\n# Override the server-derived value of SCRIPT_NAME\nFORCE_SCRIPT_NAME = None\n\n# List of compiled regular expression objects representing User-Agent strings\n# that are not allowed to visit any page, systemwide. Use this for bad\n# robots/crawlers. Here are a few examples:\n# import re\n# DISALLOWED_USER_AGENTS = [\n# re.compile(r'^NaverBot.*'),\n# re.compile(r'^EmailSiphon.*'),\n# re.compile(r'^SiteSucker.*'),\n# re.compile(r'^sohu-search'),\n# ]\nDISALLOWED_USER_AGENTS: List[Pattern] = ...\n\nABSOLUTE_URL_OVERRIDES: Dict[str, Any] = ...\n\n# List of compiled regular expression objects representing URLs that need not\n# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:\n# import re\n# IGNORABLE_404_URLS = [\n# re.compile(r'^/apple-touch-icon.*\.png$'),\n# re.compile(r'^/favicon.ico$'),\n# re.compile(r'^/robots.txt$'),\n# re.compile(r'^/phpmyadmin/'),\n# re.compile(r'\.(cgi|php|pl)$'),\n# ]\nIGNORABLE_404_URLS: List[Pattern] = ...\n\n# A secret key for this particular Django installation. Used in secret-key\n# hashing algorithms. Set this in your settings, or Django will complain\n# loudly.\nSECRET_KEY: str = ...\n\n# Default file storage mechanism that holds media.\nDEFAULT_FILE_STORAGE: str = ...\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: "/var/www/example.com/media/"\nMEDIA_ROOT: str = ...\n\n# URL that handles the media served from MEDIA_ROOT.\n# Examples: "http://example.com/media/", "http://media.example.com/"\nMEDIA_URL: str = ...\n\n# Absolute path to the directory static files should be collected to.\n# Example: "/var/www/example.com/static/"\nSTATIC_ROOT: Optional[str] = ...\n\n# URL that handles the static files served from STATIC_ROOT.\n# Example: "http://example.com/static/", "http://static.example.com/"\nSTATIC_URL: Optional[str] = ...\n\n# List of upload handler classes to be applied in order.\nFILE_UPLOAD_HANDLERS: List[str] = ...\n\n# Maximum size, in bytes, of a request before it will be streamed to the\n# file system instead of into memory.\nFILE_UPLOAD_MAX_MEMORY_SIZE: int = ... # i.e. 2.5 MB\n\n# Maximum size in bytes of request data (excluding file uploads) that will be\n# read before a SuspiciousOperation (RequestDataTooBig) is raised.\nDATA_UPLOAD_MAX_MEMORY_SIZE: int = ... # i.e. 2.5 MB\n\n# Maximum number of GET/POST parameters that will be read before a\n# SuspiciousOperation (TooManyFieldsSent) is raised.\nDATA_UPLOAD_MAX_NUMBER_FIELDS: int = ...\n\n# Directory in which upload streamed files will be temporarily saved. A value of\n# `None` will make Django use the operating system's default temporary directory\n# (i.e. "/tmp" on *nix systems).\nFILE_UPLOAD_TEMP_DIR: Optional[str] = ...\n\n# The numeric mode to set newly-uploaded files to. The value should be a mode\n# you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.\nFILE_UPLOAD_PERMISSIONS = None\n\n# The numeric mode to assign to newly-created directories, when uploading files.\n# The value should be a mode as you'd pass to os.chmod;\n# see https://docs.python.org/library/os.html#files-and-directories.\nFILE_UPLOAD_DIRECTORY_PERMISSIONS = None\n\n# Python module path where user will place custom format definition.\n# The directory where this setting is pointing should contain subdirectories\n# named as the locales, containing a formats.py file\n# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)\nFORMAT_MODULE_PATH: Optional[str] = ...\n\n# Default formatting for date objects. See all available format strings here:\n# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date\nDATE_FORMAT: str = ...\n\n# Default formatting for datetime objects. See all available format strings here:\n# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date\nDATETIME_FORMAT: str = ...\n\n# Default formatting for time objects. See all available format strings here:\n# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date\nTIME_FORMAT: str = ...\n\n# Default formatting for date objects when only the year and month are relevant.\n# See all available format strings here:\n# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date\nYEAR_MONTH_FORMAT: str = ...\n\n# Default formatting for date objects when only the month and day are relevant.\n# See all available format strings here:\n# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date\nMONTH_DAY_FORMAT: str = ...\n\n# Default short formatting for date objects. See all available format strings here:\n# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date\nSHORT_DATE_FORMAT: str = ...\n\n# Default short formatting for datetime objects.\n# See all available format strings here:\n# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date\nSHORT_DATETIME_FORMAT: str = ...\n\n# Default formats to be used when parsing dates from input boxes, in order\n# See all available format string here:\n# https://docs.python.org/library/datetime.html#strftime-behavior\n# * Note that these format strings are different from the ones to display dates\nDATE_INPUT_FORMATS: List[str] = ...\n\n# Default formats to be used when parsing times from input boxes, in order\n# See all available format string here:\n# https://docs.python.org/library/datetime.html#strftime-behavior\n# * Note that these format strings are different from the ones to display dates\nTIME_INPUT_FORMATS: List[str] = ... # '14:30:59' # '14:30:59.000200' # '14:30'\n\n# Default formats to be used when parsing dates and times from input boxes,\n# in order\n# See all available format string here:\n# https://docs.python.org/library/datetime.html#strftime-behavior\n# * Note that these format strings are different from the ones to display dates\nDATETIME_INPUT_FORMATS: List[str] = ...\n\n# First day of week, to be used on calendars\n# 0 means Sunday, 1 means Monday...\nFIRST_DAY_OF_WEEK: int = ...\n\n# Decimal separator symbol\nDECIMAL_SEPARATOR: str = ...\n\n# Boolean that sets whether to add thousand separator when formatting numbers\nUSE_THOUSAND_SEPARATOR: bool = ...\n\n# Number of digits that will be together, when splitting them by\n# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...\nNUMBER_GROUPING: int = ...\n\n# Thousand separator symbol\nTHOUSAND_SEPARATOR: str = ...\n\n# The tablespaces to use for each model when not specified otherwise.\nDEFAULT_TABLESPACE: str = ...\nDEFAULT_INDEX_TABLESPACE: str = ...\n\n# Default X-Frame-Options header value\nX_FRAME_OPTIONS: str = ...\n\nUSE_X_FORWARDED_HOST: bool = ...\nUSE_X_FORWARDED_PORT: bool = ...\n\n# The Python dotted path to the WSGI application that Django's internal server\n# (runserver) will use. If `None`, the return value of\n# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same\n# behavior as previous versions of Django. Otherwise this should point to an\n# actual WSGI application object.\nWSGI_APPLICATION: Optional[str] = ...\n\n# If your Django app is behind a proxy that sets a header to specify secure\n# connections, AND that proxy ensures that user-submitted headers with the\n# same name are ignored (so that people can't spoof it), set this value to\n# a tuple of (header_name, header_value). For any requests that come in with\n# that header/value, request.is_secure() will return True.\n# WARNING! Only set this if you fully understand what you're doing. Otherwise,\n# you may be opening yourself up to a security risk.\nSECURE_PROXY_SSL_HEADER: Optional[Tuple[str, str]] = ...\n\n##############\n# MIDDLEWARE #\n##############\n\n# List of middleware to use. Order is important; in the request phase, these\n# middleware will be applied in the order given, and in the response\n# phase the middleware will be applied in reverse order.\nMIDDLEWARE: List[str] = ...\n\n############\n# SESSIONS #\n############\n\n# Cache to store session data if using the cache session backend.\nSESSION_CACHE_ALIAS = "default"\n# Cookie name. This can be whatever you want.\nSESSION_COOKIE_NAME = "sessionid"\n# Age of cookie, in seconds (default: 2 weeks).\nSESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2\n# A string like "example.com", or None for standard domain cookie.\nSESSION_COOKIE_DOMAIN: Optional[str] = ...\n# Whether the session cookie should be secure (https:// only).\nSESSION_COOKIE_SECURE = False\n# The path of the session cookie.\nSESSION_COOKIE_PATH = "/"\n# Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)\nSESSION_COOKIE_HTTPONLY = True\n# Whether to set the flag restricting cookie leaks on cross-site requests.\n# This can be 'Lax', 'Strict', or None to disable the flag.\nSESSION_COOKIE_SAMESITE: Optional[str] = ...\n# Whether to save the session data on every request.\nSESSION_SAVE_EVERY_REQUEST = False\n# Whether a user's session cookie expires when the Web browser is closed.\nSESSION_EXPIRE_AT_BROWSER_CLOSE = False\n# The module to store session data\nSESSION_ENGINE = "django.contrib.sessions.backends.db"\n# Directory to store session files if using the file session module. If None,\n# the backend will use a sensible default.\nSESSION_FILE_PATH: Optional[str] = ...\n# class to serialize session data\nSESSION_SERIALIZER = "django.contrib.sessions.serializers.JSONSerializer"\n\n#########\n# CACHE #\n#########\n\n# The cache backends to use.\nCACHES: Dict[str, Dict[str, Any]] = ...\nCACHE_MIDDLEWARE_KEY_PREFIX = ""\nCACHE_MIDDLEWARE_SECONDS = 600\nCACHE_MIDDLEWARE_ALIAS = "default"\n\n##################\n# AUTHENTICATION #\n##################\n\nAUTH_USER_MODEL: str = ...\n\nAUTHENTICATION_BACKENDS: Sequence[str] = ...\n\nLOGIN_URL = "/accounts/login/"\n\nLOGIN_REDIRECT_URL: str = ...\n\nLOGOUT_REDIRECT_URL: Optional[str] = ...\n\n# The number of days a password reset link is valid for\nPASSWORD_RESET_TIMEOUT_DAYS = 3\n\n# the first hasher in this list is the preferred algorithm. any\n# password using different algorithms will be converted automatically\n# upon login\nPASSWORD_HASHERS: List[str] = ...\n\nAUTH_PASSWORD_VALIDATORS: List[Dict[str, str]] = ...\n\n###########\n# SIGNING #\n###########\n\nSIGNING_BACKEND = "django.core.signing.TimestampSigner"\n\n########\n# CSRF #\n########\n\n# Dotted path to callable to be used as view when a request is\n# rejected by the CSRF middleware.\nCSRF_FAILURE_VIEW = "django.views.csrf.csrf_failure"\n\n# Settings for CSRF cookie.\nCSRF_COOKIE_NAME = "csrftoken"\nCSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52\nCSRF_COOKIE_DOMAIN = None\nCSRF_COOKIE_PATH = "/"\nCSRF_COOKIE_SECURE = False\nCSRF_COOKIE_HTTPONLY = False\nCSRF_COOKIE_SAMESITE: Optional[str] = ...\nCSRF_HEADER_NAME = "HTTP_X_CSRFTOKEN"\nCSRF_TRUSTED_ORIGINS: List[str] = ...\nCSRF_USE_SESSIONS = False\n\n############\n# MESSAGES #\n############\n\n# Class to use as messages backend\nMESSAGE_STORAGE = "django.contrib.messages.storage.fallback.FallbackStorage"\n\n# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within\n# django.contrib.messages to avoid imports in this settings file.\n\n###########\n# LOGGING #\n###########\n\n# The callable to use to configure logging\nLOGGING_CONFIG = "logging.config.dictConfig"\n\n# Custom logging configuration.\nLOGGING: Dict[str, Any] = ...\n\n# Default exception reporter filter class used in case none has been\n# specifically assigned to the HttpRequest instance.\nDEFAULT_EXCEPTION_REPORTER_FILTER = "django.views.debug.SafeExceptionReporterFilter"\n\n###########\n# TESTING #\n###########\n\n# The name of the class to use to run the test suite\nTEST_RUNNER = "django.test.runner.DiscoverRunner"\n\n# Apps that don't need to be serialized at test database creation time\n# (only apps with migrations are to start with)\nTEST_NON_SERIALIZED_APPS: List[str] = ...\n\n############\n# FIXTURES #\n############\n\n# The list of directories to search for fixtures\nFIXTURE_DIRS: List[str] = ...\n\n###############\n# STATICFILES #\n###############\n\n# A list of locations of additional static files\nSTATICFILES_DIRS: List[str] = ...\n\n# The default file storage backend used during the build process\nSTATICFILES_STORAGE: str = ...\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS: List[str] = ...\n\n##############\n# MIGRATIONS #\n##############\n\n# Migration module overrides for apps, by app label.\nMIGRATION_MODULES: Dict[str, str] = ...\n\n#################\n# SYSTEM CHECKS #\n#################\n\n# List of all issues generated by system checks that should be silenced. Light\n# issues like warnings, infos or debugs will not generate a message. Silencing\n# serious issues like errors and criticals does not result in hiding the\n# message, but Django will not stop you from e.g. running server.\nSILENCED_SYSTEM_CHECKS: List[str] = ...\n\n#######################\n# SECURITY MIDDLEWARE #\n#######################\nSECURE_BROWSER_XSS_FILTER = False\nSECURE_CONTENT_TYPE_NOSNIFF = False\nSECURE_HSTS_INCLUDE_SUBDOMAINS = False\nSECURE_HSTS_PRELOAD = False\nSECURE_HSTS_SECONDS = 0\nSECURE_REDIRECT_EXEMPT: List[str] = ...\nSECURE_SSL_HOST = None\nSECURE_SSL_REDIRECT = False\n
|
.venv\Lib\site-packages\jedi\third_party\django-stubs\django-stubs\conf\global_settings.pyi
|
global_settings.pyi
|
Other
| 17,462 | 0.95 | 0.077381 | 0.625 |
vue-tools
| 699 |
2025-05-06T07:49:16.950063
|
BSD-3-Clause
| false |
732ba201db874d6de23f2c192f2f8b02
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.