content
stringlengths 1
103k
⌀ | path
stringlengths 8
216
| filename
stringlengths 2
179
| language
stringclasses 15
values | size_bytes
int64 2
189k
| quality_score
float64 0.5
0.95
| complexity
float64 0
1
| documentation_ratio
float64 0
1
| repository
stringclasses 5
values | stars
int64 0
1k
| created_date
stringdate 2023-07-10 19:21:08
2025-07-09 19:11:45
| license
stringclasses 4
values | is_test
bool 2
classes | file_hash
stringlengths 32
32
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
/* ****************************************************************************\n *\n * Copyright (c) Microsoft Corporation.\n *\n * This source code is subject to terms and conditions of the Apache License, Version 2.0. A\n * copy of the license can be found in the License.html file at the root of this distribution. If\n * you cannot locate the Apache License, Version 2.0, please send an email to\n * vspython@microsoft.com. By using this source code in any fashion, you are agreeing to be bound\n * by the terms of the Apache License, Version 2.0.\n *\n * You must not remove this notice, or any other, from this software.\n *\n * ***************************************************************************/\n\n// stdafx.h : include file for standard system include files,\n// or project specific include files that are used frequently, but\n// are changed infrequently\n//\n\n#pragma once\n\n#include "targetver.h"\n\n#include <cstdint>\n#include <fstream>\n#include <string>\n#include <unordered_set>\n#include <unordered_map>\n\n#define WIN32_LEAN_AND_MEAN\n#include <windows.h>\n#include <psapi.h>\n#include <strsafe.h>\n#include <tlhelp32.h>\n#include <winsock.h>\n#include <winternl.h>\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_attach_to_process\windows\stdafx.h
|
stdafx.h
|
C
| 1,198 | 0.8 | 0.027778 | 1 |
node-utils
| 812 |
2023-10-07T11:08:47.127651
|
GPL-3.0
| false |
c897ccc29fed72ad25f1d13049e44491
|
/* ****************************************************************************\n *\n * Copyright (c) Microsoft Corporation. \n *\n * This source code is subject to terms and conditions of the Apache License, Version 2.0. A \n * copy of the license can be found in the License.html file at the root of this distribution. If \n * you cannot locate the Apache License, Version 2.0, please send an email to \n * vspython@microsoft.com. By using this source code in any fashion, you are agreeing to be bound \n * by the terms of the Apache License, Version 2.0.\n *\n * You must not remove this notice, or any other, from this software.\n *\n * ***************************************************************************/\n\n#pragma once\n\n// Including SDKDDKVer.h defines the highest available Windows platform.\n\n// If you wish to build your application for a previous Windows platform, include WinSDKVer.h and\n// set the _WIN32_WINNT macro to the platform you wish to support before including SDKDDKVer.h.\n\n#include <SDKDDKVer.h>\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_attach_to_process\windows\targetver.h
|
targetver.h
|
C
| 1,035 | 0.8 | 0.045455 | 1 |
node-utils
| 591 |
2024-02-18T02:22:22.863751
|
GPL-3.0
| false |
bfdd9164f498446198f372a274a6d589
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_attach_to_process\__pycache__\add_code_to_python_process.cpython-313.pyc
|
add_code_to_python_process.cpython-313.pyc
|
Other
| 21,931 | 0.95 | 0.045775 | 0.023346 |
awesome-app
| 133 |
2024-11-03T11:26:20.587716
|
GPL-3.0
| false |
fec42c5f69bd385ca7e1130c3c62144b
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_attach_to_process\__pycache__\attach_pydevd.cpython-313.pyc
|
attach_pydevd.cpython-313.pyc
|
Other
| 3,414 | 0.85 | 0 | 0 |
vue-tools
| 4 |
2024-04-16T06:39:31.585385
|
BSD-3-Clause
| false |
ce43ec30fd94a0d8fa78be1b9574f1d8
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_attach_to_process\__pycache__\attach_script.cpython-313.pyc
|
attach_script.cpython-313.pyc
|
Other
| 6,761 | 0.95 | 0 | 0 |
node-utils
| 407 |
2024-11-11T13:21:03.092028
|
Apache-2.0
| false |
599f523668bdc1e52159bec9d0fe94b6
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_attach_to_process\__pycache__\_always_live_program.cpython-313.pyc
|
_always_live_program.cpython-313.pyc
|
Other
| 1,750 | 0.8 | 0 | 0 |
react-lib
| 287 |
2025-04-30T11:03:39.035345
|
BSD-3-Clause
| false |
9e30655bfd05831e8b96e2423f9708ec
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_attach_to_process\__pycache__\_check.cpython-313.pyc
|
_check.cpython-313.pyc
|
Other
| 405 | 0.7 | 0 | 0 |
node-utils
| 171 |
2023-11-20T15:10:55.107258
|
Apache-2.0
| false |
f8c0bc38e6e5891323ff0ffee38fb28f
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_attach_to_process\__pycache__\_test_attach_to_process.cpython-313.pyc
|
_test_attach_to_process.cpython-313.pyc
|
Other
| 811 | 0.8 | 0 | 0 |
vue-tools
| 310 |
2024-11-05T20:09:45.196930
|
BSD-3-Clause
| true |
3900dedcffbc4e20e49c9340f8c08483
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_attach_to_process\__pycache__\_test_attach_to_process_linux.cpython-313.pyc
|
_test_attach_to_process_linux.cpython-313.pyc
|
Other
| 3,333 | 0.8 | 0.020833 | 0.317073 |
node-utils
| 260 |
2024-03-28T04:47:48.025462
|
Apache-2.0
| true |
b24039c855241d3ee13db26caa15720d
|
from _pydevd_bundle.pydevd_breakpoints import LineBreakpoint\nfrom _pydevd_bundle.pydevd_api import PyDevdAPI\nimport bisect\nfrom _pydev_bundle import pydev_log\n\n\nclass LineBreakpointWithLazyValidation(LineBreakpoint):\n def __init__(self, *args, **kwargs):\n LineBreakpoint.__init__(self, *args, **kwargs)\n # This is the _AddBreakpointResult that'll be modified (and then re-sent on the\n # on_changed_breakpoint_state).\n self.add_breakpoint_result = None\n\n # The signature for the callback should be:\n # on_changed_breakpoint_state(breakpoint_id: int, add_breakpoint_result: _AddBreakpointResult)\n self.on_changed_breakpoint_state = None\n\n # When its state is checked (in which case it'd call on_changed_breakpoint_state if the\n # state changed), we store a cache key in 'verified_cache_key' -- in case it changes\n # we'd need to re-verify it (for instance, the template could have changed on disk).\n self.verified_cache_key = None\n\n\nclass ValidationInfo(object):\n def __init__(self):\n self._canonical_normalized_filename_to_last_template_lines = {}\n\n def _collect_valid_lines_in_template(self, template):\n # We cache the lines in the template itself. Note that among requests the\n # template may be a different instance (because the template contents could be\n # changed on disk), but this may still be called multiple times during the\n # same render session, so, caching is interesting.\n lines_cache = getattr(template, "__pydevd_lines_cache__", None)\n if lines_cache is not None:\n lines, sorted_lines = lines_cache\n return lines, sorted_lines\n\n lines = self._collect_valid_lines_in_template_uncached(template)\n\n lines = frozenset(lines)\n sorted_lines = tuple(sorted(lines))\n template.__pydevd_lines_cache__ = lines, sorted_lines\n return lines, sorted_lines\n\n def _collect_valid_lines_in_template_uncached(self, template):\n raise NotImplementedError()\n\n def verify_breakpoints(self, py_db, canonical_normalized_filename, template_breakpoints_for_file, template):\n """\n This function should be called whenever a rendering is detected.\n\n :param str canonical_normalized_filename:\n :param dict[int:LineBreakpointWithLazyValidation] template_breakpoints_for_file:\n """\n valid_lines_frozenset, sorted_lines = self._collect_valid_lines_in_template(template)\n\n self._canonical_normalized_filename_to_last_template_lines[canonical_normalized_filename] = valid_lines_frozenset, sorted_lines\n self._verify_breakpoints_with_lines_collected(\n py_db, canonical_normalized_filename, template_breakpoints_for_file, valid_lines_frozenset, sorted_lines\n )\n\n def verify_breakpoints_from_template_cached_lines(self, py_db, canonical_normalized_filename, template_breakpoints_for_file):\n """\n This is used when the lines are already available (if just the template is available,\n `verify_breakpoints` should be used instead).\n """\n cached = self._canonical_normalized_filename_to_last_template_lines.get(canonical_normalized_filename)\n if cached is not None:\n valid_lines_frozenset, sorted_lines = cached\n self._verify_breakpoints_with_lines_collected(\n py_db, canonical_normalized_filename, template_breakpoints_for_file, valid_lines_frozenset, sorted_lines\n )\n\n def _verify_breakpoints_with_lines_collected(\n self, py_db, canonical_normalized_filename, template_breakpoints_for_file, valid_lines_frozenset, sorted_lines\n ):\n for line, template_bp in list(template_breakpoints_for_file.items()): # Note: iterate in a copy (we may mutate it).\n if template_bp.verified_cache_key != valid_lines_frozenset:\n template_bp.verified_cache_key = valid_lines_frozenset\n valid = line in valid_lines_frozenset\n\n if not valid:\n new_line = -1\n if sorted_lines:\n # Adjust to the first preceding valid line.\n idx = bisect.bisect_left(sorted_lines, line)\n if idx > 0:\n new_line = sorted_lines[idx - 1]\n\n if new_line >= 0 and new_line not in template_breakpoints_for_file:\n # We just add it if found and if there's no existing breakpoint at that\n # location.\n if (\n template_bp.add_breakpoint_result.error_code != PyDevdAPI.ADD_BREAKPOINT_NO_ERROR\n and template_bp.add_breakpoint_result.translated_line != new_line\n ):\n pydev_log.debug(\n "Template breakpoint in %s in line: %s moved to line: %s", canonical_normalized_filename, line, new_line\n )\n template_bp.add_breakpoint_result.error_code = PyDevdAPI.ADD_BREAKPOINT_NO_ERROR\n template_bp.add_breakpoint_result.translated_line = new_line\n\n # Add it to a new line.\n template_breakpoints_for_file.pop(line, None)\n template_breakpoints_for_file[new_line] = template_bp\n template_bp.on_changed_breakpoint_state(template_bp.breakpoint_id, template_bp.add_breakpoint_result)\n else:\n if template_bp.add_breakpoint_result.error_code != PyDevdAPI.ADD_BREAKPOINT_INVALID_LINE:\n pydev_log.debug(\n "Template breakpoint in %s in line: %s invalid (valid lines: %s)",\n canonical_normalized_filename,\n line,\n valid_lines_frozenset,\n )\n template_bp.add_breakpoint_result.error_code = PyDevdAPI.ADD_BREAKPOINT_INVALID_LINE\n template_bp.on_changed_breakpoint_state(template_bp.breakpoint_id, template_bp.add_breakpoint_result)\n else:\n if template_bp.add_breakpoint_result.error_code != PyDevdAPI.ADD_BREAKPOINT_NO_ERROR:\n template_bp.add_breakpoint_result.error_code = PyDevdAPI.ADD_BREAKPOINT_NO_ERROR\n template_bp.on_changed_breakpoint_state(template_bp.breakpoint_id, template_bp.add_breakpoint_result)\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\pydevd_line_validation.py
|
pydevd_line_validation.py
|
Python
| 6,774 | 0.95 | 0.225 | 0.147059 |
react-lib
| 634 |
2025-06-13T10:14:30.058599
|
Apache-2.0
| false |
a59450373842ef773697294ca47a9136
|
import pkgutil\n\n__path__ = pkgutil.extend_path(__path__, __name__)\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\__init__.py
|
__init__.py
|
Python
| 70 | 0.65 | 0 | 0 |
vue-tools
| 842 |
2025-06-22T15:17:57.816707
|
MIT
| false |
fc34a85235835f65e4709eb3988c6602
|
Extensions allow extending the debugger without modifying the debugger code. This is implemented with explicit namespace\npackages.\n\nTo implement your own extension:\n\n1. Ensure that the root folder of your extension is in sys.path (add it to PYTHONPATH) \n2. Ensure that your module follows the directory structure below\n3. The ``__init__.py`` files inside the pydevd_plugin and extension folder must contain the preamble below,\nand nothing else.\nPreamble: \n```python\ntry:\n __import__('pkg_resources').declare_namespace(__name__)\nexcept ImportError:\n import pkgutil\n __path__ = pkgutil.extend_path(__path__, __name__)\n```\n4. Your plugin name inside the extensions folder must start with `"pydevd_plugin"`\n5. Implement one or more of the abstract base classes defined in `_pydevd_bundle.pydevd_extension_api`. This can be done\nby either inheriting from them or registering with the abstract base class.\n\n* Directory structure:\n```\n|-- root_directory-> must be on python path\n| |-- pydevd_plugins\n| | |-- __init__.py -> must contain preamble\n| | |-- extensions\n| | | |-- __init__.py -> must contain preamble\n| | | |-- pydevd_plugin_plugin_name.py\n```
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\extensions\README.md
|
README.md
|
Markdown
| 1,212 | 0.85 | 0.068966 | 0.037037 |
react-lib
| 439 |
2024-05-09T15:51:45.253071
|
MIT
| false |
8e1ab88b4cea444d701ba697da22fd21
|
import pkgutil\n\n__path__ = pkgutil.extend_path(__path__, __name__)\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\extensions\__init__.py
|
__init__.py
|
Python
| 70 | 0.65 | 0 | 0 |
react-lib
| 966 |
2024-07-28T16:01:48.463431
|
Apache-2.0
| false |
fc34a85235835f65e4709eb3988c6602
|
import sys\n\n\ndef find_cached_module(mod_name):\n return sys.modules.get(mod_name, None)\n\n\ndef find_mod_attr(mod_name, attr):\n mod = find_cached_module(mod_name)\n if mod is None:\n return None\n return getattr(mod, attr, None)\n\n\ndef find_class_name(val):\n class_name = str(val.__class__)\n if class_name.find(".") != -1:\n class_name = class_name.split(".")[-1]\n\n elif class_name.find("'") != -1: # does not have '.' (could be something like <type 'int'>)\n class_name = class_name[class_name.index("'") + 1 :]\n\n if class_name.endswith("'>"):\n class_name = class_name[:-2]\n\n return class_name\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\extensions\types\pydevd_helpers.py
|
pydevd_helpers.py
|
Python
| 668 | 0.95 | 0.230769 | 0 |
node-utils
| 158 |
2025-03-15T02:47:20.360260
|
MIT
| false |
d6392a7ffe95c9231f790d3de6b839ec
|
from _pydevd_bundle.pydevd_extension_api import StrPresentationProvider\nfrom .pydevd_helpers import find_mod_attr, find_class_name\n\n\nclass DjangoFormStr(object):\n def can_provide(self, type_object, type_name):\n form_class = find_mod_attr("django.forms", "Form")\n return form_class is not None and issubclass(type_object, form_class)\n\n def get_str(self, val):\n return "%s: %r" % (find_class_name(val), val)\n\n\nimport sys\n\nif not sys.platform.startswith("java"):\n StrPresentationProvider.register(DjangoFormStr)\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\extensions\types\pydevd_plugins_django_form_str.py
|
pydevd_plugins_django_form_str.py
|
Python
| 556 | 0.85 | 0.235294 | 0 |
python-kit
| 103 |
2024-02-23T15:44:53.542971
|
Apache-2.0
| false |
b6496a212afe363503e18e81f0e414f6
|
from _pydevd_bundle.pydevd_extension_api import TypeResolveProvider\nfrom _pydevd_bundle.pydevd_resolver import defaultResolver\nfrom .pydevd_helpers import find_mod_attr\nfrom _pydevd_bundle import pydevd_constants\n\nTOO_LARGE_MSG = "Maximum number of items (%s) reached. To show more items customize the value of the PYDEVD_CONTAINER_NUMPY_MAX_ITEMS environment variable."\nTOO_LARGE_ATTR = "Unable to handle:"\n\n\nclass NdArrayItemsContainer(object):\n pass\n\n\nclass NDArrayTypeResolveProvider(object):\n """\n This resolves a numpy ndarray returning some metadata about the NDArray\n """\n\n def can_provide(self, type_object, type_name):\n nd_array = find_mod_attr("numpy", "ndarray")\n return nd_array is not None and issubclass(type_object, nd_array)\n\n def is_numeric(self, obj):\n if not hasattr(obj, "dtype"):\n return False\n return obj.dtype.kind in "biufc"\n\n def resolve(self, obj, attribute):\n if attribute == "__internals__":\n return defaultResolver.get_dictionary(obj)\n if attribute == "min":\n if self.is_numeric(obj) and obj.size > 0:\n return obj.min()\n else:\n return None\n if attribute == "max":\n if self.is_numeric(obj) and obj.size > 0:\n return obj.max()\n else:\n return None\n if attribute == "shape":\n return obj.shape\n if attribute == "dtype":\n return obj.dtype\n if attribute == "size":\n return obj.size\n if attribute.startswith("["):\n container = NdArrayItemsContainer()\n i = 0\n format_str = "%0" + str(int(len(str(len(obj))))) + "d"\n for item in obj:\n setattr(container, format_str % i, item)\n i += 1\n if i >= pydevd_constants.PYDEVD_CONTAINER_NUMPY_MAX_ITEMS:\n setattr(container, TOO_LARGE_ATTR, TOO_LARGE_MSG % (pydevd_constants.PYDEVD_CONTAINER_NUMPY_MAX_ITEMS,))\n break\n return container\n return None\n\n def get_dictionary(self, obj):\n ret = dict()\n ret["__internals__"] = defaultResolver.get_dictionary(obj)\n if obj.size > 1024 * 1024:\n ret["min"] = "ndarray too big, calculating min would slow down debugging"\n ret["max"] = "ndarray too big, calculating max would slow down debugging"\n elif obj.size == 0:\n ret["min"] = "array is empty"\n ret["max"] = "array is empty"\n else:\n if self.is_numeric(obj):\n ret["min"] = obj.min()\n ret["max"] = obj.max()\n else:\n ret["min"] = "not a numeric object"\n ret["max"] = "not a numeric object"\n ret["shape"] = obj.shape\n ret["dtype"] = obj.dtype\n ret["size"] = obj.size\n try:\n ret["[0:%s] " % (len(obj))] = list(obj[0 : pydevd_constants.PYDEVD_CONTAINER_NUMPY_MAX_ITEMS])\n except:\n # This may not work depending on the array shape.\n pass\n return ret\n\n\nimport sys\n\nif not sys.platform.startswith("java"):\n TypeResolveProvider.register(NDArrayTypeResolveProvider)\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\extensions\types\pydevd_plugin_numpy_types.py
|
pydevd_plugin_numpy_types.py
|
Python
| 3,340 | 0.95 | 0.244444 | 0.012821 |
vue-tools
| 242 |
2023-09-25T01:22:40.176878
|
GPL-3.0
| false |
9daba109d9aa595172e9bc8a0e166453
|
import sys\n\nfrom _pydevd_bundle.pydevd_constants import PANDAS_MAX_ROWS, PANDAS_MAX_COLS, PANDAS_MAX_COLWIDTH\nfrom _pydevd_bundle.pydevd_extension_api import TypeResolveProvider, StrPresentationProvider\nfrom _pydevd_bundle.pydevd_resolver import inspect, MethodWrapperType\nfrom _pydevd_bundle.pydevd_utils import Timer\n\nfrom .pydevd_helpers import find_mod_attr\nfrom contextlib import contextmanager\n\n\ndef _get_dictionary(obj, replacements):\n ret = dict()\n cls = obj.__class__\n for attr_name in dir(obj):\n # This is interesting but it actually hides too much info from the dataframe.\n # attr_type_in_cls = type(getattr(cls, attr_name, None))\n # if attr_type_in_cls == property:\n # ret[attr_name] = '<property (not computed)>'\n # continue\n\n timer = Timer()\n try:\n replacement = replacements.get(attr_name)\n if replacement is not None:\n ret[attr_name] = replacement\n continue\n\n attr_value = getattr(obj, attr_name, "<unable to get>")\n if inspect.isroutine(attr_value) or isinstance(attr_value, MethodWrapperType):\n continue\n ret[attr_name] = attr_value\n except Exception as e:\n ret[attr_name] = "<error getting: %s>" % (e,)\n finally:\n timer.report_if_getting_attr_slow(cls, attr_name)\n\n return ret\n\n\n@contextmanager\ndef customize_pandas_options():\n # The default repr depends on the settings of:\n #\n # pandas.set_option('display.max_columns', None)\n # pandas.set_option('display.max_rows', None)\n #\n # which can make the repr **very** slow on some cases, so, we customize pandas to have\n # smaller values if the current values are too big.\n custom_options = []\n\n from pandas import get_option\n\n max_rows = get_option("display.max_rows")\n max_cols = get_option("display.max_columns")\n max_colwidth = get_option("display.max_colwidth")\n\n if max_rows is None or max_rows > PANDAS_MAX_ROWS:\n custom_options.append("display.max_rows")\n custom_options.append(PANDAS_MAX_ROWS)\n\n if max_cols is None or max_cols > PANDAS_MAX_COLS:\n custom_options.append("display.max_columns")\n custom_options.append(PANDAS_MAX_COLS)\n\n if max_colwidth is None or max_colwidth > PANDAS_MAX_COLWIDTH:\n custom_options.append("display.max_colwidth")\n custom_options.append(PANDAS_MAX_COLWIDTH)\n\n if custom_options:\n from pandas import option_context\n\n with option_context(*custom_options):\n yield\n else:\n yield\n\n\nclass PandasDataFrameTypeResolveProvider(object):\n def can_provide(self, type_object, type_name):\n data_frame_class = find_mod_attr("pandas.core.frame", "DataFrame")\n return data_frame_class is not None and issubclass(type_object, data_frame_class)\n\n def resolve(self, obj, attribute):\n return getattr(obj, attribute)\n\n def get_dictionary(self, obj):\n replacements = {\n # This actually calls: DataFrame.transpose(), which can be expensive, so,\n # let's just add some string representation for it.\n "T": "<transposed dataframe -- debugger:skipped eval>",\n # This creates a whole new dict{index: Series) for each column. Doing a\n # subsequent repr() from this dict can be very slow, so, don't return it.\n "_series": "<dict[index:Series] -- debugger:skipped eval>",\n "style": "<pandas.io.formats.style.Styler -- debugger: skipped eval>",\n }\n return _get_dictionary(obj, replacements)\n\n def get_str_in_context(self, df, context: str):\n """\n :param context:\n This is the context in which the variable is being requested. Valid values:\n "watch",\n "repl",\n "hover",\n "clipboard"\n """\n if context in ("repl", "clipboard"):\n return repr(df)\n return self.get_str(df)\n\n def get_str(self, df):\n with customize_pandas_options():\n return repr(df)\n\n\nclass PandasSeriesTypeResolveProvider(object):\n def can_provide(self, type_object, type_name):\n series_class = find_mod_attr("pandas.core.series", "Series")\n return series_class is not None and issubclass(type_object, series_class)\n\n def resolve(self, obj, attribute):\n return getattr(obj, attribute)\n\n def get_dictionary(self, obj):\n replacements = {\n # This actually calls: DataFrame.transpose(), which can be expensive, so,\n # let's just add some string representation for it.\n "T": "<transposed dataframe -- debugger:skipped eval>",\n # This creates a whole new dict{index: Series) for each column. Doing a\n # subsequent repr() from this dict can be very slow, so, don't return it.\n "_series": "<dict[index:Series] -- debugger:skipped eval>",\n "style": "<pandas.io.formats.style.Styler -- debugger: skipped eval>",\n }\n return _get_dictionary(obj, replacements)\n\n def get_str_in_context(self, df, context: str):\n """\n :param context:\n This is the context in which the variable is being requested. Valid values:\n "watch",\n "repl",\n "hover",\n "clipboard"\n """\n if context in ("repl", "clipboard"):\n return repr(df)\n return self.get_str(df)\n\n def get_str(self, series):\n with customize_pandas_options():\n return repr(series)\n\n\nclass PandasStylerTypeResolveProvider(object):\n def can_provide(self, type_object, type_name):\n series_class = find_mod_attr("pandas.io.formats.style", "Styler")\n return series_class is not None and issubclass(type_object, series_class)\n\n def resolve(self, obj, attribute):\n return getattr(obj, attribute)\n\n def get_dictionary(self, obj):\n replacements = {\n "data": "<Styler data -- debugger:skipped eval>",\n "__dict__": "<dict -- debugger: skipped eval>",\n }\n return _get_dictionary(obj, replacements)\n\n\nif not sys.platform.startswith("java"):\n TypeResolveProvider.register(PandasDataFrameTypeResolveProvider)\n StrPresentationProvider.register(PandasDataFrameTypeResolveProvider)\n\n TypeResolveProvider.register(PandasSeriesTypeResolveProvider)\n StrPresentationProvider.register(PandasSeriesTypeResolveProvider)\n\n TypeResolveProvider.register(PandasStylerTypeResolveProvider)\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\extensions\types\pydevd_plugin_pandas_types.py
|
pydevd_plugin_pandas_types.py
|
Python
| 6,753 | 0.95 | 0.196629 | 0.140845 |
node-utils
| 658 |
2024-11-16T20:01:24.550568
|
BSD-3-Clause
| false |
6cd6962a5d32057ac753b992d2ff675a
|
import pkgutil\n\n__path__ = pkgutil.extend_path(__path__, __name__)\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\extensions\types\__init__.py
|
__init__.py
|
Python
| 70 | 0.65 | 0 | 0 |
python-kit
| 135 |
2024-12-31T12:05:14.381872
|
GPL-3.0
| false |
fc34a85235835f65e4709eb3988c6602
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\extensions\types\__pycache__\pydevd_helpers.cpython-313.pyc
|
pydevd_helpers.cpython-313.pyc
|
Other
| 1,278 | 0.8 | 0 | 0 |
python-kit
| 62 |
2024-07-20T10:06:38.387048
|
MIT
| false |
868363c471cd0cb022c941ed1d0c23f7
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\extensions\types\__pycache__\pydevd_plugins_django_form_str.cpython-313.pyc
|
pydevd_plugins_django_form_str.cpython-313.pyc
|
Other
| 1,350 | 0.8 | 0 | 0 |
python-kit
| 479 |
2025-06-18T09:00:51.116324
|
GPL-3.0
| false |
3a1b13a5285c1bbc4ee04ba06ba6b2b0
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\extensions\types\__pycache__\pydevd_plugin_numpy_types.cpython-313.pyc
|
pydevd_plugin_numpy_types.cpython-313.pyc
|
Other
| 4,769 | 0.8 | 0 | 0 |
python-kit
| 58 |
2024-08-01T16:41:21.223192
|
GPL-3.0
| false |
983a7894fc4e2a3738010bbae05b5e40
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\extensions\types\__pycache__\pydevd_plugin_pandas_types.cpython-313.pyc
|
pydevd_plugin_pandas_types.cpython-313.pyc
|
Other
| 7,537 | 0.8 | 0 | 0 |
awesome-app
| 113 |
2023-07-22T14:52:54.557891
|
Apache-2.0
| false |
5f0196429534b247c360ea3132d4d0f2
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\extensions\types\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 342 | 0.7 | 0 | 0 |
vue-tools
| 30 |
2025-02-18T04:25:15.556198
|
Apache-2.0
| false |
0b08a2d01558b8d0839160f53e9ab591
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\extensions\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 336 | 0.7 | 0 | 0 |
vue-tools
| 99 |
2025-04-20T00:25:23.647287
|
Apache-2.0
| false |
767c319669d0f1ac6360784bf091ad55
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\__pycache__\django_debug.cpython-313.pyc
|
django_debug.cpython-313.pyc
|
Other
| 24,848 | 0.95 | 0.006536 | 0 |
react-lib
| 449 |
2025-07-02T11:20:01.894107
|
BSD-3-Clause
| false |
c2ebabae6b2e5a4359bc3b9266d046f6
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\__pycache__\jinja2_debug.cpython-313.pyc
|
jinja2_debug.cpython-313.pyc
|
Other
| 20,359 | 0.95 | 0 | 0.010204 |
vue-tools
| 292 |
2025-02-27T17:11:25.958014
|
GPL-3.0
| false |
f82a22a62a59f45b2b8465ff1a464165
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\__pycache__\pydevd_line_validation.cpython-313.pyc
|
pydevd_line_validation.cpython-313.pyc
|
Other
| 5,698 | 0.95 | 0.044444 | 0 |
awesome-app
| 952 |
2024-08-08T10:54:22.437362
|
Apache-2.0
| false |
bc384ad38026b3558a4f08277851a8a4
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydevd_plugins\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 325 | 0.7 | 0 | 0 |
python-kit
| 158 |
2023-07-19T10:34:12.964506
|
BSD-3-Clause
| false |
bb32a2f00d6ae16d7eeb34210add0ca6
|
# coding: utf-8\n"""\nGLUT Inputhook support functions\n"""\n\n# -----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n# -----------------------------------------------------------------------------\n\n# GLUT is quite an old library and it is difficult to ensure proper\n# integration within IPython since original GLUT does not allow to handle\n# events one by one. Instead, it requires for the mainloop to be entered\n# and never returned (there is not even a function to exit he\n# mainloop). Fortunately, there are alternatives such as freeglut\n# (available for linux and windows) and the OSX implementation gives\n# access to a glutCheckLoop() function that blocks itself until a new\n# event is received. This means we have to setup the idle callback to\n# ensure we got at least one event that will unblock the function.\n#\n# Furthermore, it is not possible to install these handlers without a window\n# being first created. We choose to make this window invisible. This means that\n# display mode options are set at this level and user won't be able to change\n# them later without modifying the code. This should probably be made available\n# via IPython options system.\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport os\nimport sys\nfrom _pydev_bundle._pydev_saved_modules import time\nimport signal\nimport OpenGL.GLUT as glut # @UnresolvedImport\nimport OpenGL.platform as platform # @UnresolvedImport\nfrom timeit import default_timer as clock\nfrom pydev_ipython.inputhook import stdin_ready\n\n# -----------------------------------------------------------------------------\n# Constants\n# -----------------------------------------------------------------------------\n\n# Frame per second : 60\n# Should probably be an IPython option\nglut_fps = 60\n\n# Display mode : double buffeed + rgba + depth\n# Should probably be an IPython option\nglut_display_mode = glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH\n\nglutMainLoopEvent = None\nif sys.platform == "darwin":\n try:\n glutCheckLoop = platform.createBaseFunction(\n "glutCheckLoop",\n dll=platform.GLUT,\n resultType=None,\n argTypes=[],\n doc="glutCheckLoop( ) -> None",\n argNames=(),\n )\n except AttributeError:\n raise RuntimeError("""Your glut implementation does not allow interactive sessions""" """Consider installing freeglut.""")\n glutMainLoopEvent = glutCheckLoop\nelif glut.HAVE_FREEGLUT:\n glutMainLoopEvent = glut.glutMainLoopEvent\nelse:\n raise RuntimeError("""Your glut implementation does not allow interactive sessions. """ """Consider installing freeglut.""")\n\n# -----------------------------------------------------------------------------\n# Callback functions\n# -----------------------------------------------------------------------------\n\n\ndef glut_display():\n # Dummy display function\n pass\n\n\ndef glut_idle():\n # Dummy idle function\n pass\n\n\ndef glut_close():\n # Close function only hides the current window\n glut.glutHideWindow()\n glutMainLoopEvent()\n\n\ndef glut_int_handler(signum, frame):\n # Catch sigint and print the defautl message\n signal.signal(signal.SIGINT, signal.default_int_handler)\n print("\nKeyboardInterrupt")\n # Need to reprint the prompt at this stage\n\n\n# -----------------------------------------------------------------------------\n# Code\n# -----------------------------------------------------------------------------\ndef inputhook_glut():\n """Run the pyglet event loop by processing pending events only.\n\n This keeps processing pending events until stdin is ready. After\n processing all pending events, a call to time.sleep is inserted. This is\n needed, otherwise, CPU usage is at 100%. This sleep time should be tuned\n though for best performance.\n """\n # We need to protect against a user pressing Control-C when IPython is\n # idle and this is running. We trap KeyboardInterrupt and pass.\n\n signal.signal(signal.SIGINT, glut_int_handler)\n\n try:\n t = clock()\n\n # Make sure the default window is set after a window has been closed\n if glut.glutGetWindow() == 0:\n glut.glutSetWindow(1)\n glutMainLoopEvent()\n return 0\n\n while not stdin_ready():\n glutMainLoopEvent()\n # We need to sleep at this point to keep the idle CPU load\n # low. However, if sleep to long, GUI response is poor. As\n # a compromise, we watch how often GUI events are being processed\n # and switch between a short and long sleep time. Here are some\n # stats useful in helping to tune this.\n # time CPU load\n # 0.001 13%\n # 0.005 3%\n # 0.01 1.5%\n # 0.05 0.5%\n used_time = clock() - t\n if used_time > 10.0:\n # print 'Sleep for 1 s' # dbg\n time.sleep(1.0)\n elif used_time > 0.1:\n # Few GUI events coming in, so we can sleep longer\n # print 'Sleep for 0.05 s' # dbg\n time.sleep(0.05)\n else:\n # Many GUI events coming in, so sleep only very little\n time.sleep(0.001)\n except KeyboardInterrupt:\n pass\n return 0\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\inputhookglut.py
|
inputhookglut.py
|
Python
| 5,769 | 0.95 | 0.16 | 0.472441 |
node-utils
| 171 |
2024-06-07T05:36:52.798755
|
MIT
| false |
7814eeb6aff61b2c32b48dd8ee171b63
|
# encoding: utf-8\n"""\nEnable pygtk to be used interacive by setting PyOS_InputHook.\n\nAuthors: Brian Granger\n"""\n\n# -----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\n\nimport gtk, gobject # @UnresolvedImport\n\n# -----------------------------------------------------------------------------\n# Code\n# -----------------------------------------------------------------------------\n\n\ndef _main_quit(*args, **kwargs):\n gtk.main_quit()\n return False\n\n\ndef create_inputhook_gtk(stdin_file):\n def inputhook_gtk():\n gobject.io_add_watch(stdin_file, gobject.IO_IN, _main_quit)\n gtk.main()\n return 0\n\n return inputhook_gtk\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\inputhookgtk.py
|
inputhookgtk.py
|
Python
| 1,151 | 0.95 | 0.081081 | 0.481481 |
react-lib
| 380 |
2024-10-14T19:49:56.964664
|
Apache-2.0
| false |
24f804c5fe358afa0e9b2e20ceb4eba8
|
# encoding: utf-8\n"""\nEnable Gtk3 to be used interacive by IPython.\n\nAuthors: Thomi Richards\n"""\n# -----------------------------------------------------------------------------\n# Copyright (c) 2012, the IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\n\nfrom gi.repository import Gtk, GLib # @UnresolvedImport\n\n# -----------------------------------------------------------------------------\n# Code\n# -----------------------------------------------------------------------------\n\n\ndef _main_quit(*args, **kwargs):\n Gtk.main_quit()\n return False\n\n\ndef create_inputhook_gtk3(stdin_file):\n def inputhook_gtk3():\n GLib.io_add_watch(stdin_file, GLib.IO_IN, _main_quit)\n Gtk.main()\n return 0\n\n return inputhook_gtk3\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\inputhookgtk3.py
|
inputhookgtk3.py
|
Python
| 1,149 | 0.95 | 0.081081 | 0.5 |
react-lib
| 100 |
2024-07-23T10:55:24.948128
|
BSD-3-Clause
| false |
b2142c5513c633cc4c1d64faa54483fb
|
# encoding: utf-8\n"""\nEnable pyglet to be used interacive by setting PyOS_InputHook.\n\nAuthors\n-------\n\n* Nicolas P. Rougier\n* Fernando Perez\n"""\n\n# -----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\n\nimport os\nimport sys\nfrom _pydev_bundle._pydev_saved_modules import time\nfrom timeit import default_timer as clock\nimport pyglet # @UnresolvedImport\nfrom pydev_ipython.inputhook import stdin_ready\n\n\n# On linux only, window.flip() has a bug that causes an AttributeError on\n# window close. For details, see:\n# http://groups.google.com/group/pyglet-users/browse_thread/thread/47c1aab9aa4a3d23/c22f9e819826799e?#c22f9e819826799e\n\nif sys.platform.startswith("linux"):\n\n def flip(window):\n try:\n window.flip()\n except AttributeError:\n pass\nelse:\n\n def flip(window):\n window.flip()\n\n# -----------------------------------------------------------------------------\n# Code\n# -----------------------------------------------------------------------------\n\n\ndef inputhook_pyglet():\n """Run the pyglet event loop by processing pending events only.\n\n This keeps processing pending events until stdin is ready. After\n processing all pending events, a call to time.sleep is inserted. This is\n needed, otherwise, CPU usage is at 100%. This sleep time should be tuned\n though for best performance.\n """\n # We need to protect against a user pressing Control-C when IPython is\n # idle and this is running. We trap KeyboardInterrupt and pass.\n try:\n t = clock()\n while not stdin_ready():\n pyglet.clock.tick()\n for window in pyglet.app.windows:\n window.switch_to()\n window.dispatch_events()\n window.dispatch_event("on_draw")\n flip(window)\n\n # We need to sleep at this point to keep the idle CPU load\n # low. However, if sleep to long, GUI response is poor. As\n # a compromise, we watch how often GUI events are being processed\n # and switch between a short and long sleep time. Here are some\n # stats useful in helping to tune this.\n # time CPU load\n # 0.001 13%\n # 0.005 3%\n # 0.01 1.5%\n # 0.05 0.5%\n used_time = clock() - t\n if used_time > 10.0:\n # print 'Sleep for 1 s' # dbg\n time.sleep(1.0)\n elif used_time > 0.1:\n # Few GUI events coming in, so we can sleep longer\n # print 'Sleep for 0.05 s' # dbg\n time.sleep(0.05)\n else:\n # Many GUI events coming in, so sleep only very little\n time.sleep(0.001)\n except KeyboardInterrupt:\n pass\n return 0\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\inputhookpyglet.py
|
inputhookpyglet.py
|
Python
| 3,359 | 0.95 | 0.147368 | 0.425 |
awesome-app
| 499 |
2024-12-14T21:14:15.492735
|
Apache-2.0
| false |
7b98fa0c15fffdc8491c9c1fe4ec40b4
|
# encoding: utf-8\n# Unlike what IPython does, we need to have an explicit inputhook because tkinter handles\n# input hook in the C Source code\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\n\nfrom pydev_ipython.inputhook import stdin_ready\n\n# -----------------------------------------------------------------------------\n# Code\n# -----------------------------------------------------------------------------\n\nTCL_DONT_WAIT = 1 << 1\n\n\ndef create_inputhook_tk(app):\n def inputhook_tk():\n while app.dooneevent(TCL_DONT_WAIT) == 1:\n if stdin_ready():\n break\n return 0\n\n return inputhook_tk\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\inputhooktk.py
|
inputhooktk.py
|
Python
| 779 | 0.95 | 0.16 | 0.5 |
react-lib
| 439 |
2025-03-18T04:43:16.832074
|
GPL-3.0
| false |
e1a581d5db31b6739381bf41c30f385d
|
# encoding: utf-8\n"""\nEnable wxPython to be used interacive by setting PyOS_InputHook.\n\nAuthors: Robin Dunn, Brian Granger, Ondrej Certik\n"""\n\n# -----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n# -----------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\n\nimport sys\nimport signal\nfrom _pydev_bundle._pydev_saved_modules import time\nfrom timeit import default_timer as clock\nimport wx\n\nfrom pydev_ipython.inputhook import stdin_ready\n\n\n# -----------------------------------------------------------------------------\n# Code\n# -----------------------------------------------------------------------------\n\n\ndef inputhook_wx1():\n """Run the wx event loop by processing pending events only.\n\n This approach seems to work, but its performance is not great as it\n relies on having PyOS_InputHook called regularly.\n """\n try:\n app = wx.GetApp() # @UndefinedVariable\n if app is not None:\n assert wx.Thread_IsMain() # @UndefinedVariable\n\n # Make a temporary event loop and process system events until\n # there are no more waiting, then allow idle events (which\n # will also deal with pending or posted wx events.)\n evtloop = wx.EventLoop() # @UndefinedVariable\n ea = wx.EventLoopActivator(evtloop) # @UndefinedVariable\n while evtloop.Pending():\n evtloop.Dispatch()\n app.ProcessIdle()\n del ea\n except KeyboardInterrupt:\n pass\n return 0\n\n\nclass EventLoopTimer(wx.Timer): # @UndefinedVariable\n def __init__(self, func):\n self.func = func\n wx.Timer.__init__(self) # @UndefinedVariable\n\n def Notify(self):\n self.func()\n\n\nclass EventLoopRunner(object):\n def Run(self, time):\n self.evtloop = wx.EventLoop() # @UndefinedVariable\n self.timer = EventLoopTimer(self.check_stdin)\n self.timer.Start(time)\n self.evtloop.Run()\n\n def check_stdin(self):\n if stdin_ready():\n self.timer.Stop()\n self.evtloop.Exit()\n\n\ndef inputhook_wx2():\n """Run the wx event loop, polling for stdin.\n\n This version runs the wx eventloop for an undetermined amount of time,\n during which it periodically checks to see if anything is ready on\n stdin. If anything is ready on stdin, the event loop exits.\n\n The argument to elr.Run controls how often the event loop looks at stdin.\n This determines the responsiveness at the keyboard. A setting of 1000\n enables a user to type at most 1 char per second. I have found that a\n setting of 10 gives good keyboard response. We can shorten it further,\n but eventually performance would suffer from calling select/kbhit too\n often.\n """\n try:\n app = wx.GetApp() # @UndefinedVariable\n if app is not None:\n assert wx.Thread_IsMain() # @UndefinedVariable\n elr = EventLoopRunner()\n # As this time is made shorter, keyboard response improves, but idle\n # CPU load goes up. 10 ms seems like a good compromise.\n elr.Run(time=10) # CHANGE time here to control polling interval\n except KeyboardInterrupt:\n pass\n return 0\n\n\ndef inputhook_wx3():\n """Run the wx event loop by processing pending events only.\n\n This is like inputhook_wx1, but it keeps processing pending events\n until stdin is ready. After processing all pending events, a call to\n time.sleep is inserted. This is needed, otherwise, CPU usage is at 100%.\n This sleep time should be tuned though for best performance.\n """\n # We need to protect against a user pressing Control-C when IPython is\n # idle and this is running. We trap KeyboardInterrupt and pass.\n try:\n app = wx.GetApp() # @UndefinedVariable\n if app is not None:\n if hasattr(wx, "IsMainThread"):\n assert wx.IsMainThread() # @UndefinedVariable\n else:\n assert wx.Thread_IsMain() # @UndefinedVariable\n\n # The import of wx on Linux sets the handler for signal.SIGINT\n # to 0. This is a bug in wx or gtk. We fix by just setting it\n # back to the Python default.\n if not callable(signal.getsignal(signal.SIGINT)):\n signal.signal(signal.SIGINT, signal.default_int_handler)\n\n evtloop = wx.EventLoop() # @UndefinedVariable\n ea = wx.EventLoopActivator(evtloop) # @UndefinedVariable\n t = clock()\n while not stdin_ready():\n while evtloop.Pending():\n t = clock()\n evtloop.Dispatch()\n app.ProcessIdle()\n # We need to sleep at this point to keep the idle CPU load\n # low. However, if sleep to long, GUI response is poor. As\n # a compromise, we watch how often GUI events are being processed\n # and switch between a short and long sleep time. Here are some\n # stats useful in helping to tune this.\n # time CPU load\n # 0.001 13%\n # 0.005 3%\n # 0.01 1.5%\n # 0.05 0.5%\n used_time = clock() - t\n if used_time > 10.0:\n # print 'Sleep for 1 s' # dbg\n time.sleep(1.0)\n elif used_time > 0.1:\n # Few GUI events coming in, so we can sleep longer\n # print 'Sleep for 0.05 s' # dbg\n time.sleep(0.05)\n else:\n # Many GUI events coming in, so sleep only very little\n time.sleep(0.001)\n del ea\n except KeyboardInterrupt:\n pass\n return 0\n\n\nif sys.platform == "darwin":\n # On OSX, evtloop.Pending() always returns True, regardless of there being\n # any events pending. As such we can't use implementations 1 or 3 of the\n # inputhook as those depend on a pending/dispatch loop.\n inputhook_wx = inputhook_wx2\nelse:\n # This is our default implementation\n inputhook_wx = inputhook_wx3\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\inputhookwx.py
|
inputhookwx.py
|
Python
| 6,700 | 0.95 | 0.184971 | 0.282759 |
python-kit
| 37 |
2025-01-08T04:11:52.233376
|
GPL-3.0
| false |
6f03b168121f43d6d821ac22803d5458
|
import sys\nfrom _pydev_bundle import pydev_log\n\nbackends = {\n "tk": "TkAgg",\n "gtk": "GTKAgg",\n "wx": "WXAgg",\n "qt": "QtAgg", # Auto-choose qt4/5\n "qt4": "Qt4Agg",\n "qt5": "Qt5Agg",\n "qt6": "Qt6Agg",\n "osx": "MacOSX",\n}\n\nlowercase_convert = {\n "tkagg": "TkAgg",\n "gtkagg": "GTKAgg",\n "wxagg": "WXAgg",\n "qtagg": "QtAgg",\n "qt4agg": "Qt4Agg",\n "qt5agg": "Qt5Agg",\n "qt6agg": "Qt6Agg",\n "macosx": "MacOSX",\n "gtk": "GTK",\n "gtkcairo": "GTKCairo",\n "wx": "WX",\n "cocoaagg": "CocoaAgg",\n}\n\n# We also need a reverse backends2guis mapping that will properly choose which\n# GUI support to activate based on the desired matplotlib backend. For the\n# most part it's just a reverse of the above dict, but we also need to add a\n# few others that map to the same GUI manually:\nbackend2gui = dict(zip(backends.values(), backends.keys()))\n# In the reverse mapping, there are a few extra valid matplotlib backends that\n# map to the same GUI support\nbackend2gui["GTK"] = backend2gui["GTKCairo"] = "gtk"\nbackend2gui["WX"] = "wx"\nbackend2gui["CocoaAgg"] = "osx"\n\n\ndef do_enable_gui(guiname):\n from _pydev_bundle.pydev_versioncheck import versionok_for_gui\n\n if versionok_for_gui():\n try:\n from pydev_ipython.inputhook import enable_gui\n\n enable_gui(guiname)\n except:\n sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)\n pydev_log.exception()\n elif guiname not in ["none", "", None]:\n # Only print a warning if the guiname was going to do something\n sys.stderr.write("Debug console: Python version does not support GUI event loop integration for '%s'\n" % guiname)\n # Return value does not matter, so return back what was sent\n return guiname\n\n\ndef find_gui_and_backend():\n """Return the gui and mpl backend."""\n matplotlib = sys.modules["matplotlib"]\n # WARNING: this assumes matplotlib 1.1 or newer!!\n backend = matplotlib.rcParams["backend"]\n\n # Translate to the real case as in 3.9 the case was forced to lowercase\n # but our internal mapping is in the original case.\n realcase_backend = lowercase_convert.get(backend, backend)\n\n # In this case, we need to find what the appropriate gui selection call\n # should be for IPython, so we can activate inputhook accordingly\n gui = backend2gui.get(realcase_backend, None)\n return gui, backend\n\n\ndef _get_major_version(module):\n return int(module.__version__.split('.')[0])\n\n\ndef _get_minor_version(module):\n return int(module.__version__.split('.')[1])\n\n\ndef is_interactive_backend(backend):\n """Check if backend is interactive"""\n matplotlib = sys.modules["matplotlib"]\n new_api_version = (3, 9)\n installed_version = (\n _get_major_version(matplotlib),\n _get_minor_version(matplotlib)\n )\n\n if installed_version >= new_api_version:\n interactive_bk = matplotlib.backends.backend_registry.list_builtin(\n matplotlib.backends.BackendFilter.INTERACTIVE)\n non_interactive_bk = matplotlib.backends.backend_registry.list_builtin(\n matplotlib.backends.BackendFilter.NON_INTERACTIVE)\n else:\n from matplotlib.rcsetup import interactive_bk, non_interactive_bk # @UnresolvedImport\n\n if backend in interactive_bk:\n return True\n elif backend in non_interactive_bk:\n return False\n else:\n return matplotlib.is_interactive()\n\n\ndef patch_use(enable_gui_function):\n """Patch matplotlib function 'use'"""\n matplotlib = sys.modules["matplotlib"]\n\n def patched_use(*args, **kwargs):\n matplotlib.real_use(*args, **kwargs)\n gui, backend = find_gui_and_backend()\n enable_gui_function(gui)\n\n matplotlib.real_use = matplotlib.use\n matplotlib.use = patched_use\n\n\ndef patch_is_interactive():\n """Patch matplotlib function 'use'"""\n matplotlib = sys.modules["matplotlib"]\n\n def patched_is_interactive():\n return matplotlib.rcParams["interactive"]\n\n matplotlib.real_is_interactive = matplotlib.is_interactive\n matplotlib.is_interactive = patched_is_interactive\n\n\ndef activate_matplotlib(enable_gui_function):\n """Set interactive to True for interactive backends.\n enable_gui_function - Function which enables gui, should be run in the main thread.\n """\n matplotlib = sys.modules["matplotlib"]\n gui, backend = find_gui_and_backend()\n is_interactive = is_interactive_backend(backend)\n if is_interactive:\n enable_gui_function(gui)\n if not matplotlib.is_interactive():\n sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend)\n matplotlib.interactive(True)\n else:\n if matplotlib.is_interactive():\n sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend)\n matplotlib.interactive(False)\n patch_use(enable_gui_function)\n patch_is_interactive()\n\n\ndef flag_calls(func):\n """Wrap a function to detect and flag when it gets called.\n\n This is a decorator which takes a function and wraps it in a function with\n a 'called' attribute. wrapper.called is initialized to False.\n\n The wrapper.called attribute is set to False right before each call to the\n wrapped function, so if the call fails it remains False. After the call\n completes, wrapper.called is set to True and the output is returned.\n\n Testing for truth in wrapper.called allows you to determine if a call to\n func() was attempted and succeeded."""\n\n # don't wrap twice\n if hasattr(func, "called"):\n return func\n\n def wrapper(*args, **kw):\n wrapper.called = False\n out = func(*args, **kw)\n wrapper.called = True\n return out\n\n wrapper.called = False\n wrapper.__doc__ = func.__doc__\n return wrapper\n\n\ndef activate_pylab():\n pylab = sys.modules["pylab"]\n pylab.show._needmain = False\n # We need to detect at runtime whether show() is called by the user.\n # For this, we wrap it into a decorator which adds a 'called' flag.\n pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)\n\n\ndef activate_pyplot():\n pyplot = sys.modules["matplotlib.pyplot"]\n pyplot.show._needmain = False\n # We need to detect at runtime whether show() is called by the user.\n # For this, we wrap it into a decorator which adds a 'called' flag.\n pyplot.draw_if_interactive = flag_calls(pyplot.draw_if_interactive)\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\matplotlibtools.py
|
matplotlibtools.py
|
Python
| 6,700 | 0.95 | 0.190722 | 0.117647 |
vue-tools
| 48 |
2024-04-07T17:44:22.519574
|
GPL-3.0
| false |
e751f7e11ebb9ce1b44ebba05ba497c4
|
"""A Qt API selector that can be used to switch between PyQt and PySide.\n\nThis uses the ETS 4.0 selection pattern of:\nPySide first, PyQt with API v2. second.\n\nDo not use this if you need PyQt with the old QString/QVariant API.\n"""\n\nimport os\n\nfrom pydev_ipython.qt_loaders import load_qt, QT_API_PYSIDE, QT_API_PYSIDE2, QT_API_PYQT, QT_API_PYQT5, QT_API_PYQT6\n\nQT_API = os.environ.get("QT_API", None)\nif QT_API not in [QT_API_PYSIDE, QT_API_PYSIDE2, QT_API_PYQT, QT_API_PYQT5, QT_API_PYQT6, None]:\n raise RuntimeError(\n "Invalid Qt API %r, valid values are: %r, %r, %r, %r, %r"\n % (QT_API, QT_API_PYSIDE, QT_API_PYSIDE2, QT_API_PYQT, QT_API_PYQT5, QT_API_PYQT6)\n )\nif QT_API is None:\n api_opts = [QT_API_PYSIDE, QT_API_PYSIDE2, QT_API_PYQT, QT_API_PYQT5, QT_API_PYQT6]\nelse:\n api_opts = [QT_API]\n\nQtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\qt.py
|
qt.py
|
Python
| 896 | 0.85 | 0.166667 | 0 |
vue-tools
| 519 |
2025-04-14T16:36:35.112396
|
BSD-3-Clause
| false |
d4196094100a3bec551d5784c6b517ce
|
"""Import Qt in a manner suitable for an IPython kernel.\n\nThis is the import used for the `gui=qt` or `matplotlib=qt` initialization.\n\nImport Priority:\n\nif Qt4 has been imported anywhere else:\n use that\n\nif matplotlib has been imported and doesn't support v2 (<= 1.0.1):\n use PyQt4 @v1\n\nNext, ask ETS' QT_API env variable\n\nif QT_API not set:\n ask matplotlib via rcParams['backend.qt4']\n if it said PyQt:\n use PyQt4 @v1\n elif it said PySide:\n use PySide\n\n else: (matplotlib said nothing)\n # this is the default path - nobody told us anything\n try:\n PyQt @v1\n except:\n fallback on PySide\nelse:\n use PyQt @v2 or PySide, depending on QT_API\n because ETS doesn't work with PyQt @v1.\n\n"""\n\nimport os\nimport sys\n\nfrom pydev_ipython.version import check_version\nfrom pydev_ipython.qt_loaders import (\n load_qt,\n QT_API_PYSIDE,\n QT_API_PYSIDE2,\n QT_API_PYQT,\n QT_API_PYQT_DEFAULT,\n loaded_api,\n QT_API_PYQT5,\n QT_API_PYQT6,\n)\n\n\n# Constraints placed on an imported matplotlib\ndef matplotlib_options(mpl):\n if mpl is None:\n return\n\n # #PyDev-779: In pysrc/pydev_ipython/qt_for_kernel.py, matplotlib_options should be replaced with latest from ipython\n # (i.e.: properly check backend to decide upon qt4/qt5).\n\n backend = mpl.rcParams.get("backend", None)\n if backend == "Qt4Agg":\n mpqt = mpl.rcParams.get("backend.qt4", None)\n if mpqt is None:\n return None\n if mpqt.lower() == "pyside":\n return [QT_API_PYSIDE]\n elif mpqt.lower() == "pyqt4":\n return [QT_API_PYQT_DEFAULT]\n elif mpqt.lower() == "pyqt4v2":\n return [QT_API_PYQT]\n raise ImportError("unhandled value for backend.qt4 from matplotlib: %r" % mpqt)\n\n elif backend == "Qt5Agg":\n mpqt = mpl.rcParams.get("backend.qt5", None)\n if mpqt is None:\n return None\n if mpqt.lower() == "pyqt5":\n return [QT_API_PYQT5]\n raise ImportError("unhandled value for backend.qt5 from matplotlib: %r" % mpqt)\n\n elif backend == "Qt6Agg":\n mpqt = mpl.rcParams.get("backend.qt6", None)\n if mpqt is None:\n return None\n if mpqt.lower() == "pyqt6":\n return [QT_API_PYQT6]\n raise ImportError("unhandled value for backend.qt6 from matplotlib: %r" % mpqt)\n\n # Fallback without checking backend (previous code)\n mpqt = mpl.rcParams.get("backend.qt4", None)\n if mpqt is None:\n mpqt = mpl.rcParams.get("backend.qt5", None)\n if mpqt is None:\n mpqt = mpl.rcParams.get("backend.qt6", None)\n\n if mpqt is None:\n return None\n if mpqt.lower() == "pyside":\n return [QT_API_PYSIDE]\n elif mpqt.lower() == "pyqt4":\n return [QT_API_PYQT_DEFAULT]\n elif mpqt.lower() == "pyqt5":\n return [QT_API_PYQT5]\n elif mpqt.lower() == "pyqt6":\n return [QT_API_PYQT6]\n raise ImportError("unhandled value for qt backend from matplotlib: %r" % mpqt)\n\n\ndef get_options():\n """Return a list of acceptable QT APIs, in decreasing order of\n preference\n """\n # already imported Qt somewhere. Use that\n loaded = loaded_api()\n if loaded is not None:\n return [loaded]\n\n mpl = sys.modules.get("matplotlib", None)\n\n if mpl is not None and not check_version(mpl.__version__, "1.0.2"):\n # 1.0.1 only supports PyQt4 v1\n return [QT_API_PYQT_DEFAULT]\n\n if os.environ.get("QT_API", None) is None:\n # no ETS variable. Ask mpl, then use either\n return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE, QT_API_PYSIDE2, QT_API_PYQT5, QT_API_PYQT6]\n\n # ETS variable present. Will fallback to external.qt\n return None\n\n\napi_opts = get_options()\nif api_opts is not None:\n QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)\n\nelse: # use ETS variable\n from pydev_ipython.qt import QtCore, QtGui, QtSvg, QT_API\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\qt_for_kernel.py
|
qt_for_kernel.py
|
Python
| 4,089 | 0.95 | 0.214815 | 0.083333 |
node-utils
| 627 |
2024-04-18T19:39:34.004676
|
GPL-3.0
| false |
b4ec710b31e045cfa10cd09c2ed2aee1
|
"""\nThis module contains factory functions that attempt\nto return Qt submodules from the various python Qt bindings.\n\nIt also protects against double-importing Qt with different\nbindings, which is unstable and likely to crash\n\nThis is used primarily by qt and qt_for_kernel, and shouldn't\nbe accessed directly from the outside\n"""\n\nimport sys\nfrom functools import partial\n\nfrom pydev_ipython.version import check_version\n\n# Available APIs.\nQT_API_PYQT = "pyqt"\nQT_API_PYQTv1 = "pyqtv1"\nQT_API_PYQT_DEFAULT = "pyqtdefault" # don't set SIP explicitly\nQT_API_PYSIDE = "pyside"\nQT_API_PYSIDE2 = "pyside2"\nQT_API_PYSIDE6 = "pyside6"\nQT_API_PYQT5 = "pyqt5"\nQT_API_PYQT6 = "pyqt6"\n\n\nclass ImportDenier(object):\n """Import Hook that will guard against bad Qt imports\n once IPython commits to a specific binding\n """\n\n def __init__(self):\n self.__forbidden = set()\n\n def forbid(self, module_name):\n sys.modules.pop(module_name, None)\n self.__forbidden.add(module_name)\n\n def find_module(self, fullname, path=None):\n if path:\n return\n if fullname in self.__forbidden:\n return self\n\n def load_module(self, fullname):\n raise ImportError(\n """\n Importing %s disabled by IPython, which has\n already imported an Incompatible QT Binding: %s\n """\n % (fullname, loaded_api())\n )\n\n\nID = ImportDenier()\nsys.meta_path.append(ID)\n\n\ndef commit_api(api):\n """Commit to a particular API, and trigger ImportErrors on subsequent\n dangerous imports"""\n\n if api == QT_API_PYSIDE:\n ID.forbid("PyQt4")\n ID.forbid("PyQt5")\n ID.forbid("PyQt6")\n else:\n ID.forbid("PySide")\n ID.forbid("PySide2")\n ID.forbid("PySide6")\n\n\ndef loaded_api():\n """Return which API is loaded, if any\n\n If this returns anything besides None,\n importing any other Qt binding is unsafe.\n\n Returns\n -------\n None, 'pyside', 'pyside2', 'pyside6', 'pyqt5', 'pyqt6', or 'pyqtv1'\n """\n if "PyQt4.QtCore" in sys.modules:\n if qtapi_version() == 2:\n return QT_API_PYQT\n else:\n return QT_API_PYQTv1\n elif "PySide.QtCore" in sys.modules:\n return QT_API_PYSIDE\n elif "PySide2.QtCore" in sys.modules:\n return QT_API_PYSIDE2\n elif "PySide6.QtCore" in sys.modules:\n return QT_API_PYSIDE6\n elif "PyQt5.QtCore" in sys.modules:\n return QT_API_PYQT5\n elif "PyQt6.QtCore" in sys.modules:\n return QT_API_PYQT6\n return None\n\n\ndef has_binding(api):\n """Safely check for PyQt or PySide, without importing\n submodules\n\n Parameters\n ----------\n api : str [ 'pyqtv1' | 'pyqt' | 'pyside' | 'pyqtdefault']\n Which module to check for\n\n Returns\n -------\n True if the relevant module appears to be importable\n """\n # we can't import an incomplete pyside and pyqt4\n # this will cause a crash in sip (#1431)\n # check for complete presence before importing\n module_name = {\n QT_API_PYSIDE: "PySide",\n QT_API_PYSIDE2: "PySide2",\n QT_API_PYSIDE6: "PySide6",\n QT_API_PYQT: "PyQt4",\n QT_API_PYQTv1: "PyQt4",\n QT_API_PYQT_DEFAULT: "PyQt4",\n QT_API_PYQT5: "PyQt5",\n QT_API_PYQT6: "PyQt6",\n }\n module_name = module_name[api]\n\n import importlib\n\n try:\n # importing top level PyQt4/PySide module is ok...\n mod = __import__(module_name)\n # ...importing submodules is not\n\n for check in ("QtCore", "QtGui", "QtSvg"):\n if importlib.util.find_spec("%s.%s" % (module_name, check)) is None:\n return False\n\n # we can also safely check PySide version\n if api == QT_API_PYSIDE:\n return check_version(mod.__version__, "1.0.3")\n else:\n return True\n\n except ModuleNotFoundError:\n try:\n from importlib import machinery\n\n # importing top level PyQt4/PySide module is ok...\n mod = __import__(module_name)\n\n # ...importing submodules is not\n loader_details = (machinery.ExtensionFileLoader, machinery.EXTENSION_SUFFIXES)\n submod_finder = machinery.FileFinder(mod.__path__[0], loader_details)\n submod_check = (\n submod_finder.find_spec("QtCore") is not None\n and submod_finder.find_spec("QtGui") is not None\n and submod_finder.find_spec("QtSvg") is not None\n )\n\n # we can also safely check PySide version\n if api == QT_API_PYSIDE:\n return check_version(mod.__version__, '1.0.3') and submod_check\n else:\n return submod_check\n except:\n return False\n \n except ImportError:\n return False\n\n\ndef qtapi_version():\n """Return which QString API has been set, if any\n\n Returns\n -------\n The QString API version (1 or 2), or None if not set\n """\n try:\n import sip\n except ImportError:\n return\n try:\n return sip.getapi("QString")\n except ValueError:\n return\n\n\ndef can_import(api):\n """Safely query whether an API is importable, without importing it"""\n if not has_binding(api):\n return False\n\n current = loaded_api()\n if api == QT_API_PYQT_DEFAULT:\n return current in [QT_API_PYQT, QT_API_PYQTv1, QT_API_PYQT5, QT_API_PYQT6, None]\n else:\n return current in [api, None]\n\n\ndef import_pyqt4(version=2):\n """\n Import PyQt4\n\n Parameters\n ----------\n version : 1, 2, or None\n Which QString/QVariant API to use. Set to None to use the system\n default\n\n ImportErrors raised within this function are non-recoverable\n """\n # The new-style string API (version=2) automatically\n # converts QStrings to Unicode Python strings. Also, automatically unpacks\n # QVariants to their underlying objects.\n import sip\n\n if version is not None:\n sip.setapi("QString", version)\n sip.setapi("QVariant", version)\n\n from PyQt4 import QtGui, QtCore, QtSvg\n\n if not check_version(QtCore.PYQT_VERSION_STR, "4.7"):\n raise ImportError("IPython requires PyQt4 >= 4.7, found %s" % QtCore.PYQT_VERSION_STR)\n\n # Alias PyQt-specific functions for PySide compatibility.\n QtCore.Signal = QtCore.pyqtSignal\n QtCore.Slot = QtCore.pyqtSlot\n\n # query for the API version (in case version == None)\n version = sip.getapi("QString")\n api = QT_API_PYQTv1 if version == 1 else QT_API_PYQT\n return QtCore, QtGui, QtSvg, api\n\n\ndef import_pyqt5():\n """\n Import PyQt5\n\n ImportErrors raised within this function are non-recoverable\n """\n from PyQt5 import QtGui, QtCore, QtSvg\n\n # Alias PyQt-specific functions for PySide compatibility.\n QtCore.Signal = QtCore.pyqtSignal\n QtCore.Slot = QtCore.pyqtSlot\n\n return QtCore, QtGui, QtSvg, QT_API_PYQT5\n\n\ndef import_pyqt6():\n """\n Import PyQt6\n\n ImportErrors raised within this function are non-recoverable\n """\n from PyQt6 import QtGui, QtCore, QtSvg\n\n # Alias PyQt-specific functions for PySide compatibility.\n QtCore.Signal = QtCore.pyqtSignal\n QtCore.Slot = QtCore.pyqtSlot\n\n return QtCore, QtGui, QtSvg, QT_API_PYQT6\n\n\ndef import_pyside():\n """\n Import PySide\n\n ImportErrors raised within this function are non-recoverable\n """\n from PySide import QtGui, QtCore, QtSvg # @UnresolvedImport\n\n return QtCore, QtGui, QtSvg, QT_API_PYSIDE\n\n\ndef import_pyside2():\n """\n Import PySide2\n\n ImportErrors raised within this function are non-recoverable\n """\n from PySide2 import QtGui, QtCore, QtSvg # @UnresolvedImport\n\n return QtCore, QtGui, QtSvg, QT_API_PYSIDE2\n\n\ndef import_pyside6():\n """\n Import PySide6\n\n ImportErrors raised within this function are non-recoverable\n """\n from PySide6 import QtGui, QtCore, QtSvg # @UnresolvedImport\n\n return QtCore, QtGui, QtSvg, QT_API_PYSIDE6\n\n\ndef load_qt(api_options):\n """\n Attempt to import Qt, given a preference list\n of permissible bindings\n\n It is safe to call this function multiple times.\n\n Parameters\n ----------\n api_options: List of strings\n The order of APIs to try. Valid items are 'pyside',\n 'pyqt', and 'pyqtv1'\n\n Returns\n -------\n\n A tuple of QtCore, QtGui, QtSvg, QT_API\n The first three are the Qt modules. The last is the\n string indicating which module was loaded.\n\n Raises\n ------\n ImportError, if it isn't possible to import any requested\n bindings (either becaues they aren't installed, or because\n an incompatible library has already been installed)\n """\n loaders = {\n QT_API_PYSIDE: import_pyside,\n QT_API_PYSIDE2: import_pyside2,\n QT_API_PYSIDE6: import_pyside6,\n QT_API_PYQT: import_pyqt4,\n QT_API_PYQTv1: partial(import_pyqt4, version=1),\n QT_API_PYQT_DEFAULT: partial(import_pyqt4, version=None),\n QT_API_PYQT5: import_pyqt5,\n QT_API_PYQT6: import_pyqt6,\n }\n\n for api in api_options:\n if api not in loaders:\n raise RuntimeError(\n "Invalid Qt API %r, valid values are: %r, %r, %r, %r, %r, %r, %r"\n % (api, QT_API_PYSIDE, QT_API_PYSIDE2, QT_API_PYQT, QT_API_PYQTv1, QT_API_PYQT_DEFAULT, QT_API_PYQT5, QT_API_PYQT6)\n )\n\n if not can_import(api):\n continue\n\n # cannot safely recover from an ImportError during this\n result = loaders[api]()\n api = result[-1] # changed if api = QT_API_PYQT_DEFAULT\n commit_api(api)\n return result\n else:\n raise ImportError(\n """\n Could not load requested Qt binding. Please ensure that\n PyQt4 >= 4.7 or PySide >= 1.0.3 is available,\n and only one is imported per session.\n\n Currently-imported Qt library: %r\n PyQt4 installed: %s\n PyQt5 installed: %s\n PyQt6 installed: %s\n PySide >= 1.0.3 installed: %s\n PySide2 installed: %s\n PySide6 installed: %s\n Tried to load: %r\n """\n % (\n loaded_api(),\n has_binding(QT_API_PYQT),\n has_binding(QT_API_PYQT5),\n has_binding(QT_API_PYQT6),\n has_binding(QT_API_PYSIDE),\n has_binding(QT_API_PYSIDE2),\n has_binding(QT_API_PYSIDE6),\n api_options,\n )\n )\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\qt_loaders.py
|
qt_loaders.py
|
Python
| 10,922 | 0.95 | 0.154856 | 0.06 |
awesome-app
| 602 |
2023-09-21T18:13:21.696345
|
Apache-2.0
| false |
77e57c7fecdf8b0def437b8c3b979234
|
# Parts of IPython, files from: https://github.com/ipython/ipython/tree/rel-1.0.0/IPython\n# The files in this package are extracted from IPython to aid the main loop integration\n# See tests_mainloop for some manually runable tests\n\n# What we are doing is reusing the "inputhook" functionality (i.e. what in IPython\n# ends up on PyOS_InputHook) and using it in the pydevconsole context.\n# Rather that having the callbacks called in PyOS_InputHook, we use a custom XML-RPC\n# Server (HookableXMLRPCServer) that calls the inputhook when idle\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\README
|
README
|
Other
| 546 | 0.95 | 0.125 | 1 |
python-kit
| 10 |
2024-02-17T05:39:51.374966
|
Apache-2.0
| false |
d72ad09b821e32c1ec98cae2ecf49c7a
|
"""\nUtility for version comparison\n"""\n\n\nclass _Version:\n def __init__(self, s):\n parts = s.split(".")\n version_parts = []\n for p in parts:\n try:\n version_parts.append(int(p))\n except ValueError:\n version_parts.append(p)\n\n self._version_parts = tuple(version_parts)\n\n def __ge__(self, v):\n this_parts = self._version_parts\n other_parts = v._version_parts\n\n while len(this_parts) < len(other_parts):\n this_parts = this_parts + (0,)\n\n return this_parts >= other_parts\n\n\ndef check_version(found_version, expected_min_or_eq_to_version):\n """check version string found_version >= expected_min_or_eq_to_version\n\n If dev/prerelease tags result in TypeError for string-number comparison,\n it is assumed that the dependency is satisfied.\n Users on dev branches are responsible for keeping their own packages up to date.\n """\n try:\n return _Version(found_version) >= _Version(expected_min_or_eq_to_version)\n except TypeError:\n return True\n\n\nif __name__ == "__main__":\n assert check_version("1.2.3", "1.2.3")\n assert check_version("1.2.4", "1.2.3")\n assert check_version("1.2", "1.2.bar")\n assert check_version("1.3", "1.2.bar")\n assert check_version("1.3", "1.2b")\n assert not check_version("1.2", "1.3")\n assert not check_version("1.2.0", "1.2.1")\n assert not check_version("1.2", "1.2.1")\n print("Ok, checks passed")\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\version.py
|
version.py
|
Python
| 1,546 | 0.85 | 0.24 | 0 |
react-lib
| 368 |
2024-05-06T03:55:35.334318
|
MIT
| false |
035b724d140e9a6201ec9be3369d4170
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\__pycache__\inputhook.cpython-313.pyc
|
inputhook.cpython-313.pyc
|
Other
| 21,358 | 0.95 | 0.115756 | 0.003831 |
node-utils
| 898 |
2023-12-24T05:31:24.332232
|
BSD-3-Clause
| false |
db16cb35ee5afb7d6549485ee273af08
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\__pycache__\inputhookglut.cpython-313.pyc
|
inputhookglut.cpython-313.pyc
|
Other
| 3,592 | 0.95 | 0.018182 | 0 |
node-utils
| 707 |
2025-03-02T12:47:57.334325
|
MIT
| false |
4dbf063ece78749564716382c9285197
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\__pycache__\inputhookgtk.cpython-313.pyc
|
inputhookgtk.cpython-313.pyc
|
Other
| 1,041 | 0.7 | 0 | 0 |
awesome-app
| 363 |
2025-04-26T10:06:35.027493
|
BSD-3-Clause
| false |
beaf77074f67fcba21ab63b05d30627b
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\__pycache__\inputhookgtk3.cpython-313.pyc
|
inputhookgtk3.cpython-313.pyc
|
Other
| 1,055 | 0.7 | 0 | 0 |
react-lib
| 505 |
2024-09-24T16:55:17.104091
|
MIT
| false |
79ddd1f3ef84817392e48c4e9cd8fe08
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\__pycache__\inputhookpyglet.cpython-313.pyc
|
inputhookpyglet.cpython-313.pyc
|
Other
| 2,453 | 0.8 | 0.020833 | 0.04878 |
node-utils
| 814 |
2024-12-06T20:37:06.838232
|
MIT
| false |
d666e73b4eb2b6fb4efea511bf029b2f
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\__pycache__\inputhookqt4.cpython-313.pyc
|
inputhookqt4.cpython-313.pyc
|
Other
| 5,677 | 0.95 | 0.097826 | 0 |
node-utils
| 664 |
2024-05-10T15:36:10.834305
|
BSD-3-Clause
| false |
271b928eec63a9fad631f9571d0db5cb
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\__pycache__\inputhookqt5.cpython-313.pyc
|
inputhookqt5.cpython-313.pyc
|
Other
| 5,713 | 0.95 | 0.098901 | 0 |
node-utils
| 688 |
2023-09-04T03:15:29.757762
|
GPL-3.0
| false |
6832c7c5f6f24b52874481b10f462fc2
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\__pycache__\inputhookqt6.cpython-313.pyc
|
inputhookqt6.cpython-313.pyc
|
Other
| 5,773 | 0.95 | 0.098901 | 0 |
react-lib
| 940 |
2024-04-05T15:18:18.839831
|
MIT
| false |
c678ba98074a0eb0c61e5fa405796a47
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\__pycache__\inputhooktk.cpython-313.pyc
|
inputhooktk.cpython-313.pyc
|
Other
| 814 | 0.7 | 0 | 0 |
awesome-app
| 302 |
2025-03-06T12:47:00.760926
|
GPL-3.0
| false |
6f15ef77c6037d30839e4a755af16084
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\__pycache__\inputhookwx.cpython-313.pyc
|
inputhookwx.cpython-313.pyc
|
Other
| 6,377 | 0.8 | 0.050633 | 0 |
python-kit
| 204 |
2023-09-18T17:32:27.542027
|
MIT
| false |
9839597f077b2ff6bc158e242bec1995
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\__pycache__\matplotlibtools.cpython-313.pyc
|
matplotlibtools.cpython-313.pyc
|
Other
| 7,615 | 0.95 | 0.169014 | 0 |
react-lib
| 106 |
2023-09-18T21:22:13.300029
|
Apache-2.0
| false |
c6b43a79fa0e38bbe71e1de1030b8d99
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\__pycache__\qt.cpython-313.pyc
|
qt.cpython-313.pyc
|
Other
| 1,084 | 0.7 | 0.133333 | 0 |
node-utils
| 437 |
2024-06-04T05:06:16.673834
|
BSD-3-Clause
| false |
5442f8781421aead7950eb2860ef532f
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\__pycache__\qt_for_kernel.cpython-313.pyc
|
qt_for_kernel.cpython-313.pyc
|
Other
| 4,358 | 0.95 | 0.134146 | 0.013889 |
vue-tools
| 43 |
2024-04-29T15:26:11.785385
|
GPL-3.0
| false |
6ea54c6d0fb1cd9852623b4352af3129
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\__pycache__\qt_loaders.cpython-313.pyc
|
qt_loaders.cpython-313.pyc
|
Other
| 11,759 | 0.95 | 0.085714 | 0 |
react-lib
| 675 |
2025-01-12T08:40:07.937396
|
Apache-2.0
| false |
c8529c7086ba82a9a5d0c63653e11bb7
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\__pycache__\version.cpython-313.pyc
|
version.cpython-313.pyc
|
Other
| 2,551 | 0.8 | 0.103448 | 0 |
vue-tools
| 64 |
2024-01-18T02:27:00.887971
|
Apache-2.0
| false |
2ff45a40e0a6e69480e270ec9a2dec92
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_ipython\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 213 | 0.7 | 0 | 0 |
react-lib
| 993 |
2024-02-13T08:33:58.994637
|
Apache-2.0
| false |
5d0dbc8100acb83f53ade923fc90d82d
|
(no __init__.py file)
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_sitecustomize\__not_in_default_pythonpath.txt
|
__not_in_default_pythonpath.txt
|
Other
| 21 | 0.5 | 0.1 | 0 |
awesome-app
| 624 |
2024-02-18T16:42:05.937204
|
MIT
| false |
bb83c307cc31bfa91b30fd0ceb1252b8
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\pydev_sitecustomize\__pycache__\sitecustomize.cpython-313.pyc
|
sitecustomize.cpython-313.pyc
|
Other
| 7,029 | 0.8 | 0.013333 | 0 |
vue-tools
| 932 |
2025-04-28T15:15:38.359822
|
Apache-2.0
| false |
cdd23797b86725135558b32557c7d0f0
|
# Defines which version of the PyDBAdditionalThreadInfo we'll use.\nfrom _pydevd_bundle.pydevd_constants import ENV_FALSE_LOWER_VALUES, USE_CYTHON_FLAG, ENV_TRUE_LOWER_VALUES\n\nif USE_CYTHON_FLAG in ENV_TRUE_LOWER_VALUES:\n # We must import the cython version if forcing cython\n from _pydevd_bundle.pydevd_cython_wrapper import (\n PyDBAdditionalThreadInfo,\n set_additional_thread_info,\n _set_additional_thread_info_lock, # @UnusedImport\n any_thread_stepping,\n remove_additional_info,\n ) # @UnusedImport\n\nelif USE_CYTHON_FLAG in ENV_FALSE_LOWER_VALUES:\n # Use the regular version if not forcing cython\n from _pydevd_bundle.pydevd_additional_thread_info_regular import (\n PyDBAdditionalThreadInfo,\n set_additional_thread_info,\n _set_additional_thread_info_lock, # @UnusedImport @Reimport\n any_thread_stepping,\n remove_additional_info,\n ) # @UnusedImport @Reimport\n\nelse:\n # Regular: use fallback if not found (message is already given elsewhere).\n try:\n from _pydevd_bundle.pydevd_cython_wrapper import (\n PyDBAdditionalThreadInfo,\n set_additional_thread_info,\n _set_additional_thread_info_lock,\n any_thread_stepping,\n remove_additional_info,\n )\n except ImportError:\n from _pydevd_bundle.pydevd_additional_thread_info_regular import (\n PyDBAdditionalThreadInfo,\n set_additional_thread_info,\n _set_additional_thread_info_lock, # @UnusedImport\n any_thread_stepping,\n remove_additional_info,\n ) # @UnusedImport\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_additional_thread_info.py
|
pydevd_additional_thread_info.py
|
Python
| 1,685 | 0.95 | 0.121951 | 0.105263 |
vue-tools
| 960 |
2023-10-04T12:04:32.416181
|
BSD-3-Clause
| false |
a6cd97485daa38c7ed4c0a68d734cb87
|
from _pydevd_bundle.pydevd_constants import (\n STATE_RUN,\n PYTHON_SUSPEND,\n SUPPORT_GEVENT,\n ForkSafeLock,\n _current_frames,\n STATE_SUSPEND,\n get_global_debugger,\n get_thread_id,\n)\nfrom _pydev_bundle import pydev_log\nfrom _pydev_bundle._pydev_saved_modules import threading\nfrom _pydev_bundle.pydev_is_thread_alive import is_thread_alive\nimport weakref\n\nversion = 11\n\n\n# =======================================================================================================================\n# PyDBAdditionalThreadInfo\n# =======================================================================================================================\n# fmt: off\n# IFDEF CYTHON\n# cdef class PyDBAdditionalThreadInfo:\n# ELSE\nclass PyDBAdditionalThreadInfo(object):\n# ENDIF\n# fmt: on\n\n # Note: the params in cython are declared in pydevd_cython.pxd.\n # fmt: off\n # IFDEF CYTHON\n # ELSE\n __slots__ = [\n "pydev_state",\n "pydev_step_stop",\n "pydev_original_step_cmd",\n "pydev_step_cmd",\n "pydev_notify_kill",\n "pydev_django_resolve_frame",\n "pydev_call_from_jinja2",\n "pydev_call_inside_jinja2",\n "is_tracing",\n "conditional_breakpoint_exception",\n "pydev_message",\n "suspend_type",\n "pydev_next_line",\n "pydev_func_name",\n "suspended_at_unhandled",\n "trace_suspend_type",\n "top_level_thread_tracer_no_back_frames",\n "top_level_thread_tracer_unhandled",\n "thread_tracer",\n "step_in_initial_location",\n # Used for CMD_SMART_STEP_INTO (to know which smart step into variant to use)\n "pydev_smart_parent_offset",\n "pydev_smart_child_offset",\n # Used for CMD_SMART_STEP_INTO (list[_pydevd_bundle.pydevd_bytecode_utils.Variant])\n # Filled when the cmd_get_smart_step_into_variants is requested (so, this is a copy\n # of the last request for a given thread and pydev_smart_parent_offset/pydev_smart_child_offset relies on it).\n "pydev_smart_step_into_variants",\n "target_id_to_smart_step_into_variant",\n "pydev_use_scoped_step_frame",\n "weak_thread",\n "is_in_wait_loop",\n ]\n # ENDIF\n # fmt: on\n\n def __init__(self):\n self.pydev_state = STATE_RUN # STATE_RUN or STATE_SUSPEND\n self.pydev_step_stop = None\n\n # Note: we have `pydev_original_step_cmd` and `pydev_step_cmd` because the original is to\n # say the action that started it and the other is to say what's the current tracing behavior\n # (because it's possible that we start with a step over but may have to switch to a\n # different step strategy -- for instance, if a step over is done and we return the current\n # method the strategy is changed to a step in).\n\n self.pydev_original_step_cmd = -1 # Something as CMD_STEP_INTO, CMD_STEP_OVER, etc.\n self.pydev_step_cmd = -1 # Something as CMD_STEP_INTO, CMD_STEP_OVER, etc.\n\n self.pydev_notify_kill = False\n self.pydev_django_resolve_frame = False\n self.pydev_call_from_jinja2 = None\n self.pydev_call_inside_jinja2 = None\n self.is_tracing = 0\n self.conditional_breakpoint_exception = None\n self.pydev_message = ""\n self.suspend_type = PYTHON_SUSPEND\n self.pydev_next_line = -1\n self.pydev_func_name = ".invalid." # Must match the type in cython\n self.suspended_at_unhandled = False\n self.trace_suspend_type = "trace" # 'trace' or 'frame_eval'\n self.top_level_thread_tracer_no_back_frames = []\n self.top_level_thread_tracer_unhandled = None\n self.thread_tracer = None\n self.step_in_initial_location = None\n self.pydev_smart_parent_offset = -1\n self.pydev_smart_child_offset = -1\n self.pydev_smart_step_into_variants = ()\n self.target_id_to_smart_step_into_variant = {}\n\n # Flag to indicate ipython use-case where each line will be executed as a call/line/return\n # in a new new frame but in practice we want to consider each new frame as if it was all\n # part of the same frame.\n #\n # In practice this means that a step over shouldn't revert to a step in and we need some\n # special logic to know when we should stop in a step over as we need to consider 2\n # different frames as being equal if they're logically the continuation of a frame\n # being executed by ipython line by line.\n #\n # See: https://github.com/microsoft/debugpy/issues/869#issuecomment-1132141003\n self.pydev_use_scoped_step_frame = False\n self.weak_thread = None\n\n # Purpose: detect if this thread is suspended and actually in the wait loop\n # at this time (otherwise it may be suspended but still didn't reach a point.\n # to pause).\n self.is_in_wait_loop = False\n\n # fmt: off\n # IFDEF CYTHON\n # cpdef object _get_related_thread(self):\n # ELSE\n def _get_related_thread(self):\n # ENDIF\n # fmt: on\n if self.pydev_notify_kill: # Already killed\n return None\n\n if self.weak_thread is None:\n return None\n\n thread = self.weak_thread()\n if thread is None:\n return False\n\n if not is_thread_alive(thread):\n return None\n\n if thread._ident is None: # Can this happen?\n pydev_log.critical("thread._ident is None in _get_related_thread!")\n return None\n\n if threading._active.get(thread._ident) is not thread:\n return None\n\n return thread\n\n # fmt: off\n # IFDEF CYTHON\n # cpdef bint _is_stepping(self):\n # ELSE\n def _is_stepping(self):\n # ENDIF\n # fmt: on\n if self.pydev_state == STATE_RUN and self.pydev_step_cmd != -1:\n # This means actually stepping in a step operation.\n return True\n\n if self.pydev_state == STATE_SUSPEND and self.is_in_wait_loop:\n # This means stepping because it was suspended but still didn't\n # reach a suspension point.\n return True\n\n return False\n\n # fmt: off\n # IFDEF CYTHON\n # cpdef get_topmost_frame(self, thread):\n # ELSE\n def get_topmost_frame(self, thread):\n # ENDIF\n # fmt: on\n """\n Gets the topmost frame for the given thread. Note that it may be None\n and callers should remove the reference to the frame as soon as possible\n to avoid disturbing user code.\n """\n # sys._current_frames(): dictionary with thread id -> topmost frame\n current_frames = _current_frames()\n topmost_frame = current_frames.get(thread._ident)\n if topmost_frame is None:\n # Note: this is expected for dummy threads (so, getting the topmost frame should be\n # treated as optional).\n pydev_log.info(\n "Unable to get topmost frame for thread: %s, thread.ident: %s, id(thread): %s\nCurrent frames: %s.\n" "GEVENT_SUPPORT: %s",\n thread,\n thread.ident,\n id(thread),\n current_frames,\n SUPPORT_GEVENT,\n )\n\n return topmost_frame\n\n # fmt: off\n # IFDEF CYTHON\n # cpdef update_stepping_info(self):\n # ELSE\n def update_stepping_info(self):\n # ENDIF\n # fmt: on\n _update_stepping_info(self)\n\n def __str__(self):\n return "State:%s Stop:%s Cmd: %s Kill:%s" % (self.pydev_state, self.pydev_step_stop, self.pydev_step_cmd, self.pydev_notify_kill)\n\n\n_set_additional_thread_info_lock = ForkSafeLock()\n_next_additional_info = [PyDBAdditionalThreadInfo()]\n\n\n# fmt: off\n# IFDEF CYTHON\n# cpdef set_additional_thread_info(thread):\n# ELSE\ndef set_additional_thread_info(thread):\n# ENDIF\n# fmt: on\n try:\n additional_info = thread.additional_info\n if additional_info is None:\n raise AttributeError()\n except:\n with _set_additional_thread_info_lock:\n # If it's not there, set it within a lock to avoid any racing\n # conditions.\n try:\n additional_info = thread.additional_info\n except:\n additional_info = None\n\n if additional_info is None:\n # Note: don't call PyDBAdditionalThreadInfo constructor at this\n # point as it can piggy-back into the debugger which could\n # get here again, rather get the global ref which was pre-created\n # and add a new entry only after we set thread.additional_info.\n additional_info = _next_additional_info[0]\n thread.additional_info = additional_info\n additional_info.weak_thread = weakref.ref(thread)\n add_additional_info(additional_info)\n del _next_additional_info[:]\n _next_additional_info.append(PyDBAdditionalThreadInfo())\n\n return additional_info\n\n\n# fmt: off\n# IFDEF CYTHON\n# cdef set _all_infos\n# cdef set _infos_stepping\n# cdef object _update_infos_lock\n# ELSE\n# ENDIF\n# fmt: on\n\n_all_infos = set()\n_infos_stepping = set()\n_update_infos_lock = ForkSafeLock()\n\n\n# fmt: off\n# IFDEF CYTHON\n# cdef _update_stepping_info(PyDBAdditionalThreadInfo info):\n# ELSE\ndef _update_stepping_info(info):\n# ENDIF\n# fmt: on\n\n global _infos_stepping\n global _all_infos\n\n with _update_infos_lock:\n # Removes entries that are no longer valid.\n new_all_infos = set()\n for info in _all_infos:\n if info._get_related_thread() is not None:\n new_all_infos.add(info)\n _all_infos = new_all_infos\n\n new_stepping = set()\n for info in _all_infos:\n if info._is_stepping():\n new_stepping.add(info)\n _infos_stepping = new_stepping\n\n py_db = get_global_debugger()\n if py_db is not None and not py_db.pydb_disposed:\n thread = info.weak_thread()\n if thread is not None:\n thread_id = get_thread_id(thread)\n _queue, event = py_db.get_internal_queue_and_event(thread_id)\n event.set()\n\n# fmt: off\n# IFDEF CYTHON\n# cpdef add_additional_info(PyDBAdditionalThreadInfo info):\n# ELSE\ndef add_additional_info(info):\n# ENDIF\n# fmt: on\n with _update_infos_lock:\n _all_infos.add(info)\n if info._is_stepping():\n _infos_stepping.add(info)\n\n# fmt: off\n# IFDEF CYTHON\n# cpdef remove_additional_info(PyDBAdditionalThreadInfo info):\n# ELSE\ndef remove_additional_info(info):\n# ENDIF\n# fmt: on\n with _update_infos_lock:\n _all_infos.discard(info)\n _infos_stepping.discard(info)\n\n\n# fmt: off\n# IFDEF CYTHON\n# cpdef bint any_thread_stepping():\n# ELSE\ndef any_thread_stepping():\n# ENDIF\n# fmt: on\n return bool(_infos_stepping)\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_additional_thread_info_regular.py
|
pydevd_additional_thread_info_regular.py
|
Python
| 11,139 | 0.95 | 0.137195 | 0.392982 |
vue-tools
| 356 |
2023-10-15T05:26:08.369022
|
BSD-3-Clause
| false |
eb5f2a6b59680ef78ac909cabacce2b3
|
from _pydevd_bundle.pydevd_constants import IS_PY311_OR_GREATER\nimport dis\nfrom types import CodeType\nfrom collections import namedtuple\n\nDEBUG = False\n\n_Pos = namedtuple("_Pos", "lineno endlineno startcol endcol")\n\n\ndef _is_inside(item_pos: _Pos, container_pos: _Pos):\n if item_pos.lineno < container_pos.lineno or item_pos.endlineno > container_pos.endlineno:\n return False\n\n if item_pos.lineno == container_pos.lineno:\n if item_pos.startcol < container_pos.startcol:\n return False\n\n if item_pos.endlineno == container_pos.endlineno:\n if item_pos.endcol > container_pos.endcol:\n return False\n\n # Not outside, must be inside.\n return True\n\n\ndef _get_smart_step_into_targets(code):\n import linecache\n from .pydevd_bytecode_utils import Target\n\n filename = code.co_filename\n\n targets_root = []\n children = []\n for instr in dis.Bytecode(code):\n if instr.opname == "LOAD_CONST":\n if isinstance(instr.argval, CodeType):\n children.append(_get_smart_step_into_targets(instr.argval))\n\n elif instr.opname in ("CALL", "CALL_INTRINSIC_1"):\n positions = instr.positions\n if positions.lineno is None:\n continue\n if positions.end_lineno is None:\n continue\n lines = []\n for lineno in range(positions.lineno, positions.end_lineno + 1):\n lines.append(linecache.getline(filename, lineno))\n\n startcol = positions.col_offset\n endcol = positions.end_col_offset\n\n if positions.lineno == positions.end_lineno:\n lines[0] = lines[0][startcol:endcol]\n else:\n lines[0] = lines[0][startcol:]\n lines[-1] = lines[-1][:endcol]\n\n pos = _Pos(positions.lineno, positions.end_lineno, startcol, endcol)\n targets_root.append(Target("".join(lines), positions.lineno, instr.offset, [], positions.end_lineno, startcol, endcol))\n\n for targets in children:\n for child_target in targets:\n pos = _Pos(child_target.lineno, child_target.endlineno, child_target.startcol, child_target.endcol)\n\n for outer_target in targets_root:\n outer_pos = _Pos(outer_target.lineno, outer_target.endlineno, outer_target.startcol, outer_target.endcol)\n if _is_inside(pos, outer_pos):\n outer_target.children_targets.append(child_target)\n break\n return targets_root\n\n\ndef calculate_smart_step_into_variants(frame, start_line, end_line, base=0):\n """\n Calculate smart step into variants for the given line range.\n :param frame:\n :type frame: :py:class:`types.FrameType`\n :param start_line:\n :param end_line:\n :return: A list of call names from the first to the last.\n :note: it's guaranteed that the offsets appear in order.\n :raise: :py:class:`RuntimeError` if failed to parse the bytecode or if dis cannot be used.\n """\n from .pydevd_bytecode_utils import _convert_target_to_variant\n\n variants = []\n code = frame.f_code\n lasti = frame.f_lasti\n\n call_order_cache = {}\n if DEBUG:\n print("dis.dis:")\n if IS_PY311_OR_GREATER:\n dis.dis(code, show_caches=False)\n else:\n dis.dis(code)\n\n for target in _get_smart_step_into_targets(code):\n variant = _convert_target_to_variant(target, start_line, end_line, call_order_cache, lasti, base)\n if variant is None:\n continue\n variants.append(variant)\n\n return variants\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_bytecode_utils_py311.py
|
pydevd_bytecode_utils_py311.py
|
Python
| 3,705 | 0.95 | 0.266667 | 0.012195 |
python-kit
| 906 |
2025-03-24T12:25:11.542740
|
Apache-2.0
| false |
8cf9fca8f9136a1e16990babc0eece15
|
"""\nDecompiler that can be used with the debugger (where statements correctly represent the\nline numbers).\n\nNote: this is a work in progress / proof of concept / not ready to be used.\n"""\n\nimport dis\n\nfrom _pydevd_bundle.pydevd_collect_bytecode_info import iter_instructions\nfrom _pydev_bundle import pydev_log\nimport sys\nimport inspect\nfrom io import StringIO\n\n\nclass _Stack(object):\n def __init__(self):\n self._contents = []\n\n def push(self, obj):\n # print('push', obj)\n self._contents.append(obj)\n\n def pop(self):\n return self._contents.pop(-1)\n\n\nINDENT_MARKER = object()\nDEDENT_MARKER = object()\n_SENTINEL = object()\n\nDEBUG = False\n\n\nclass _Token(object):\n def __init__(self, i_line, instruction=None, tok=_SENTINEL, priority=0, after=None, end_of_line=False):\n """\n :param i_line:\n :param instruction:\n :param tok:\n :param priority:\n :param after:\n :param end_of_line:\n Marker to signal only after all the other tokens have been written.\n """\n self.i_line = i_line\n if tok is not _SENTINEL:\n self.tok = tok\n else:\n if instruction is not None:\n if inspect.iscode(instruction.argval):\n self.tok = ""\n else:\n self.tok = str(instruction.argval)\n else:\n raise AssertionError("Either the tok or the instruction is needed.")\n self.instruction = instruction\n self.priority = priority\n self.end_of_line = end_of_line\n self._after_tokens = set()\n self._after_handler_tokens = set()\n if after:\n self.mark_after(after)\n\n def mark_after(self, v):\n if isinstance(v, _Token):\n self._after_tokens.add(v)\n elif isinstance(v, _BaseHandler):\n self._after_handler_tokens.add(v)\n\n else:\n raise AssertionError("Unhandled: %s" % (v,))\n\n def get_after_tokens(self):\n ret = self._after_tokens.copy()\n for handler in self._after_handler_tokens:\n ret.update(handler.tokens)\n return ret\n\n def __repr__(self):\n return "Token(%s, after: %s)" % (self.tok, self.get_after_tokens())\n\n __str__ = __repr__\n\n\nclass _Writer(object):\n def __init__(self):\n self.line_to_contents = {}\n self.all_tokens = set()\n\n def get_line(self, line):\n lst = self.line_to_contents.get(line)\n if lst is None:\n lst = self.line_to_contents[line] = []\n return lst\n\n def indent(self, line):\n self.get_line(line).append(INDENT_MARKER)\n\n def dedent(self, line):\n self.get_line(line).append(DEDENT_MARKER)\n\n def write(self, line, token):\n if token in self.all_tokens:\n return\n self.all_tokens.add(token)\n assert isinstance(token, _Token)\n lst = self.get_line(line)\n lst.append(token)\n\n\nclass _BaseHandler(object):\n def __init__(self, i_line, instruction, stack, writer, disassembler):\n self.i_line = i_line\n self.instruction = instruction\n self.stack = stack\n self.writer = writer\n self.disassembler = disassembler\n self.tokens = []\n self._handle()\n\n def _write_tokens(self):\n for token in self.tokens:\n self.writer.write(token.i_line, token)\n\n def _handle(self):\n raise NotImplementedError(self)\n\n def __repr__(self, *args, **kwargs):\n try:\n return "%s line:%s" % (self.instruction, self.i_line)\n except:\n return object.__repr__(self)\n\n __str__ = __repr__\n\n\n_op_name_to_handler = {}\n\n\ndef _register(cls):\n _op_name_to_handler[cls.opname] = cls\n return cls\n\n\nclass _BasePushHandler(_BaseHandler):\n def _handle(self):\n self.stack.push(self)\n\n\nclass _BaseLoadHandler(_BasePushHandler):\n def _handle(self):\n _BasePushHandler._handle(self)\n self.tokens = [_Token(self.i_line, self.instruction)]\n\n\n@_register\nclass _LoadBuildClass(_BasePushHandler):\n opname = "LOAD_BUILD_CLASS"\n\n\n@_register\nclass _LoadConst(_BaseLoadHandler):\n opname = "LOAD_CONST"\n\n\n@_register\nclass _LoadName(_BaseLoadHandler):\n opname = "LOAD_NAME"\n\n\n@_register\nclass _LoadGlobal(_BaseLoadHandler):\n opname = "LOAD_GLOBAL"\n\n\n@_register\nclass _LoadFast(_BaseLoadHandler):\n opname = "LOAD_FAST"\n\n\n@_register\nclass _GetIter(_BaseHandler):\n """\n Implements TOS = iter(TOS).\n """\n\n opname = "GET_ITER"\n iter_target = None\n\n def _handle(self):\n self.iter_target = self.stack.pop()\n self.tokens.extend(self.iter_target.tokens)\n self.stack.push(self)\n\n\n@_register\nclass _ForIter(_BaseHandler):\n """\n TOS is an iterator. Call its __next__() method. If this yields a new value, push it on the stack\n (leaving the iterator below it). If the iterator indicates it is exhausted TOS is popped, and\n the byte code counter is incremented by delta.\n """\n\n opname = "FOR_ITER"\n\n iter_in = None\n\n def _handle(self):\n self.iter_in = self.stack.pop()\n self.stack.push(self)\n\n def store_in_name(self, store_name):\n for_token = _Token(self.i_line, None, "for ")\n self.tokens.append(for_token)\n prev = for_token\n\n t_name = _Token(store_name.i_line, store_name.instruction, after=prev)\n self.tokens.append(t_name)\n prev = t_name\n\n in_token = _Token(store_name.i_line, None, " in ", after=prev)\n self.tokens.append(in_token)\n prev = in_token\n\n max_line = store_name.i_line\n if self.iter_in:\n for t in self.iter_in.tokens:\n t.mark_after(prev)\n max_line = max(max_line, t.i_line)\n prev = t\n self.tokens.extend(self.iter_in.tokens)\n\n colon_token = _Token(self.i_line, None, ":", after=prev)\n self.tokens.append(colon_token)\n prev = for_token\n\n self._write_tokens()\n\n\n@_register\nclass _StoreName(_BaseHandler):\n """\n Implements name = TOS. namei is the index of name in the attribute co_names of the code object.\n The compiler tries to use STORE_FAST or STORE_GLOBAL if possible.\n """\n\n opname = "STORE_NAME"\n\n def _handle(self):\n v = self.stack.pop()\n\n if isinstance(v, _ForIter):\n v.store_in_name(self)\n else:\n if not isinstance(v, _MakeFunction) or v.is_lambda:\n line = self.i_line\n for t in v.tokens:\n line = min(line, t.i_line)\n\n t_name = _Token(line, self.instruction)\n t_equal = _Token(line, None, "=", after=t_name)\n\n self.tokens.append(t_name)\n self.tokens.append(t_equal)\n\n for t in v.tokens:\n t.mark_after(t_equal)\n self.tokens.extend(v.tokens)\n\n self._write_tokens()\n\n\n@_register\nclass _ReturnValue(_BaseHandler):\n """\n Returns with TOS to the caller of the function.\n """\n\n opname = "RETURN_VALUE"\n\n def _handle(self):\n v = self.stack.pop()\n return_token = _Token(self.i_line, None, "return ", end_of_line=True)\n self.tokens.append(return_token)\n for token in v.tokens:\n token.mark_after(return_token)\n self.tokens.extend(v.tokens)\n\n self._write_tokens()\n\n\n@_register\nclass _CallFunction(_BaseHandler):\n """\n\n CALL_FUNCTION(argc)\n\n Calls a callable object with positional arguments. argc indicates the number of positional\n arguments. The top of the stack contains positional arguments, with the right-most argument\n on top. Below the arguments is a callable object to call. CALL_FUNCTION pops all arguments\n and the callable object off the stack, calls the callable object with those arguments, and\n pushes the return value returned by the callable object.\n\n Changed in version 3.6: This opcode is used only for calls with positional arguments.\n\n """\n\n opname = "CALL_FUNCTION"\n\n def _handle(self):\n args = []\n for _i in range(self.instruction.argval + 1):\n arg = self.stack.pop()\n args.append(arg)\n it = reversed(args)\n name = next(it)\n max_line = name.i_line\n for t in name.tokens:\n self.tokens.append(t)\n\n tok_open_parens = _Token(name.i_line, None, "(", after=name)\n self.tokens.append(tok_open_parens)\n\n prev = tok_open_parens\n for i, arg in enumerate(it):\n for t in arg.tokens:\n t.mark_after(name)\n t.mark_after(prev)\n max_line = max(max_line, t.i_line)\n self.tokens.append(t)\n prev = arg\n\n if i > 0:\n comma_token = _Token(prev.i_line, None, ",", after=prev)\n self.tokens.append(comma_token)\n prev = comma_token\n\n tok_close_parens = _Token(max_line, None, ")", after=prev)\n self.tokens.append(tok_close_parens)\n\n self._write_tokens()\n\n self.stack.push(self)\n\n\n@_register\nclass _MakeFunctionPy3(_BaseHandler):\n """\n Pushes a new function object on the stack. From bottom to top, the consumed stack must consist\n of values if the argument carries a specified flag value\n\n 0x01 a tuple of default values for positional-only and positional-or-keyword parameters in positional order\n\n 0x02 a dictionary of keyword-only parameters' default values\n\n 0x04 an annotation dictionary\n\n 0x08 a tuple containing cells for free variables, making a closure\n\n the code associated with the function (at TOS1)\n\n the qualified name of the function (at TOS)\n """\n\n opname = "MAKE_FUNCTION"\n is_lambda = False\n\n def _handle(self):\n stack = self.stack\n self.qualified_name = stack.pop()\n self.code = stack.pop()\n\n default_node = None\n if self.instruction.argval & 0x01:\n default_node = stack.pop()\n\n is_lambda = self.is_lambda = "<lambda>" in [x.tok for x in self.qualified_name.tokens]\n\n if not is_lambda:\n def_token = _Token(self.i_line, None, "def ")\n self.tokens.append(def_token)\n\n for token in self.qualified_name.tokens:\n self.tokens.append(token)\n if not is_lambda:\n token.mark_after(def_token)\n prev = token\n\n open_parens_token = _Token(self.i_line, None, "(", after=prev)\n self.tokens.append(open_parens_token)\n prev = open_parens_token\n\n code = self.code.instruction.argval\n\n if default_node:\n defaults = ([_SENTINEL] * (len(code.co_varnames) - len(default_node.instruction.argval))) + list(\n default_node.instruction.argval\n )\n else:\n defaults = [_SENTINEL] * len(code.co_varnames)\n\n for i, arg in enumerate(code.co_varnames):\n if i > 0:\n comma_token = _Token(prev.i_line, None, ", ", after=prev)\n self.tokens.append(comma_token)\n prev = comma_token\n\n arg_token = _Token(self.i_line, None, arg, after=prev)\n self.tokens.append(arg_token)\n\n default = defaults[i]\n if default is not _SENTINEL:\n eq_token = _Token(default_node.i_line, None, "=", after=prev)\n self.tokens.append(eq_token)\n prev = eq_token\n\n default_token = _Token(default_node.i_line, None, str(default), after=prev)\n self.tokens.append(default_token)\n prev = default_token\n\n tok_close_parens = _Token(prev.i_line, None, "):", after=prev)\n self.tokens.append(tok_close_parens)\n\n self._write_tokens()\n\n stack.push(self)\n self.writer.indent(prev.i_line + 1)\n self.writer.dedent(max(self.disassembler.merge_code(code)))\n\n\n_MakeFunction = _MakeFunctionPy3\n\n\ndef _print_after_info(line_contents, stream=None):\n if stream is None:\n stream = sys.stdout\n for token in line_contents:\n after_tokens = token.get_after_tokens()\n if after_tokens:\n s = "%s after: %s\n" % (repr(token.tok), ('"' + '", "'.join(t.tok for t in token.get_after_tokens()) + '"'))\n stream.write(s)\n else:\n stream.write("%s (NO REQUISITES)" % repr(token.tok))\n\n\ndef _compose_line_contents(line_contents, previous_line_tokens):\n lst = []\n handled = set()\n\n add_to_end_of_line = []\n delete_indexes = []\n for i, token in enumerate(line_contents):\n if token.end_of_line:\n add_to_end_of_line.append(token)\n delete_indexes.append(i)\n for i in reversed(delete_indexes):\n del line_contents[i]\n del delete_indexes\n\n while line_contents:\n added = False\n delete_indexes = []\n\n for i, token in enumerate(line_contents):\n after_tokens = token.get_after_tokens()\n for after in after_tokens:\n if after not in handled and after not in previous_line_tokens:\n break\n else:\n added = True\n previous_line_tokens.add(token)\n handled.add(token)\n lst.append(token.tok)\n delete_indexes.append(i)\n\n for i in reversed(delete_indexes):\n del line_contents[i]\n\n if not added:\n if add_to_end_of_line:\n line_contents.extend(add_to_end_of_line)\n del add_to_end_of_line[:]\n continue\n\n # Something is off, let's just add as is.\n for token in line_contents:\n if token not in handled:\n lst.append(token.tok)\n\n stream = StringIO()\n _print_after_info(line_contents, stream)\n pydev_log.critical("Error. After markers are not correct:\n%s", stream.getvalue())\n break\n return "".join(lst)\n\n\nclass _PyCodeToSource(object):\n def __init__(self, co, memo=None):\n if memo is None:\n memo = {}\n self.memo = memo\n self.co = co\n self.instructions = list(iter_instructions(co))\n self.stack = _Stack()\n self.writer = _Writer()\n\n def _process_next(self, i_line):\n instruction = self.instructions.pop(0)\n handler_class = _op_name_to_handler.get(instruction.opname)\n if handler_class is not None:\n s = handler_class(i_line, instruction, self.stack, self.writer, self)\n if DEBUG:\n print(s)\n\n else:\n if DEBUG:\n print("UNHANDLED", instruction)\n\n def build_line_to_contents(self):\n co = self.co\n\n op_offset_to_line = dict(dis.findlinestarts(co))\n curr_line_index = 0\n\n instructions = self.instructions\n while instructions:\n instruction = instructions[0]\n new_line_index = op_offset_to_line.get(instruction.offset)\n if new_line_index is not None:\n curr_line_index = new_line_index\n\n self._process_next(curr_line_index)\n return self.writer.line_to_contents\n\n def merge_code(self, code):\n if DEBUG:\n print("merge code ----")\n # for d in dir(code):\n # if not d.startswith('_'):\n # print(d, getattr(code, d))\n line_to_contents = _PyCodeToSource(code, self.memo).build_line_to_contents()\n lines = []\n for line, contents in sorted(line_to_contents.items()):\n lines.append(line)\n self.writer.get_line(line).extend(contents)\n if DEBUG:\n print("end merge code ----")\n return lines\n\n def disassemble(self):\n show_lines = False\n line_to_contents = self.build_line_to_contents()\n stream = StringIO()\n last_line = 0\n indent = ""\n previous_line_tokens = set()\n for i_line, contents in sorted(line_to_contents.items()):\n while last_line < i_line - 1:\n if show_lines:\n stream.write("%s.\n" % (last_line + 1,))\n else:\n stream.write("\n")\n last_line += 1\n\n line_contents = []\n dedents_found = 0\n for part in contents:\n if part is INDENT_MARKER:\n if DEBUG:\n print("found indent", i_line)\n indent += " "\n continue\n if part is DEDENT_MARKER:\n if DEBUG:\n print("found dedent", i_line)\n dedents_found += 1\n continue\n line_contents.append(part)\n\n s = indent + _compose_line_contents(line_contents, previous_line_tokens)\n if show_lines:\n stream.write("%s. %s\n" % (i_line, s))\n else:\n stream.write("%s\n" % s)\n\n if dedents_found:\n indent = indent[: -(4 * dedents_found)]\n last_line = i_line\n\n return stream.getvalue()\n\n\ndef code_obj_to_source(co):\n """\n Converts a code object to source code to provide a suitable representation for the compiler when\n the actual source code is not found.\n\n This is a work in progress / proof of concept / not ready to be used.\n """\n ret = _PyCodeToSource(co).disassemble()\n if DEBUG:\n print(ret)\n return ret\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_code_to_source.py
|
pydevd_code_to_source.py
|
Python
| 18,165 | 0.95 | 0.219835 | 0.010846 |
node-utils
| 423 |
2024-04-10T12:36:13.328851
|
GPL-3.0
| false |
227a63796677c22a6f5db91c86d93d84
|
import dis\nimport inspect\nimport sys\nfrom collections import namedtuple\n\nfrom _pydev_bundle import pydev_log\nfrom opcode import EXTENDED_ARG, HAVE_ARGUMENT, cmp_op, hascompare, hasconst, hasfree, hasjrel, haslocal, hasname, opname\n\nfrom io import StringIO\n\n\nclass TryExceptInfo(object):\n def __init__(self, try_line, ignore=False):\n """\n :param try_line:\n :param ignore:\n Usually we should ignore any block that's not a try..except\n (this can happen for finally blocks, with statements, etc, for\n which we create temporary entries).\n """\n self.try_line = try_line\n self.ignore = ignore\n self.except_line = -1\n self.except_end_line = -1\n self.raise_lines_in_except = []\n\n # Note: these may not be available if generated from source instead of bytecode.\n self.except_bytecode_offset = -1\n self.except_end_bytecode_offset = -1\n\n def is_line_in_try_block(self, line):\n return self.try_line <= line < self.except_line\n\n def is_line_in_except_block(self, line):\n return self.except_line <= line <= self.except_end_line\n\n def __str__(self):\n lst = [\n "{try:",\n str(self.try_line),\n " except ",\n str(self.except_line),\n " end block ",\n str(self.except_end_line),\n ]\n if self.raise_lines_in_except:\n lst.append(" raises: %s" % (", ".join(str(x) for x in self.raise_lines_in_except),))\n\n lst.append("}")\n return "".join(lst)\n\n __repr__ = __str__\n\n\nclass ReturnInfo(object):\n def __init__(self, return_line):\n self.return_line = return_line\n\n def __str__(self):\n return "{return: %s}" % (self.return_line,)\n\n __repr__ = __str__\n\n\ndef _get_line(op_offset_to_line, op_offset, firstlineno, search=False):\n op_offset_original = op_offset\n while op_offset >= 0:\n ret = op_offset_to_line.get(op_offset)\n if ret is not None:\n return ret - firstlineno\n if not search:\n return ret\n else:\n op_offset -= 1\n raise AssertionError("Unable to find line for offset: %s.Info: %s" % (op_offset_original, op_offset_to_line))\n\n\ndef debug(s):\n pass\n\n\n_Instruction = namedtuple("_Instruction", "opname, opcode, starts_line, argval, is_jump_target, offset, argrepr")\n\n\ndef iter_instructions(co):\n iter_in = dis.Bytecode(co)\n iter_in = list(iter_in)\n\n bytecode_to_instruction = {}\n for instruction in iter_in:\n bytecode_to_instruction[instruction.offset] = instruction\n\n if iter_in:\n for instruction in iter_in:\n yield instruction\n\n\ndef collect_return_info(co, use_func_first_line=False):\n if not hasattr(co, "co_lines") and not hasattr(co, "co_lnotab"):\n return []\n\n if use_func_first_line:\n firstlineno = co.co_firstlineno\n else:\n firstlineno = 0\n\n lst = []\n op_offset_to_line = dict(dis.findlinestarts(co))\n for instruction in iter_instructions(co):\n curr_op_name = instruction.opname\n if curr_op_name in ("RETURN_VALUE", "RETURN_CONST"):\n lst.append(ReturnInfo(_get_line(op_offset_to_line, instruction.offset, firstlineno, search=True)))\n\n return lst\n\n\nif sys.version_info[:2] <= (3, 9):\n\n class _TargetInfo(object):\n def __init__(self, except_end_instruction, jump_if_not_exc_instruction=None):\n self.except_end_instruction = except_end_instruction\n self.jump_if_not_exc_instruction = jump_if_not_exc_instruction\n\n def __str__(self):\n msg = ["_TargetInfo("]\n msg.append(self.except_end_instruction.opname)\n if self.jump_if_not_exc_instruction:\n msg.append(" - ")\n msg.append(self.jump_if_not_exc_instruction.opname)\n msg.append("(")\n msg.append(str(self.jump_if_not_exc_instruction.argval))\n msg.append(")")\n msg.append(")")\n return "".join(msg)\n\n def _get_except_target_info(instructions, exception_end_instruction_index, offset_to_instruction_idx):\n next_3 = [\n j_instruction.opname for j_instruction in instructions[exception_end_instruction_index : exception_end_instruction_index + 3]\n ]\n # print('next_3:', [(j_instruction.opname, j_instruction.argval) for j_instruction in instructions[exception_end_instruction_index:exception_end_instruction_index + 3]])\n if next_3 == ["POP_TOP", "POP_TOP", "POP_TOP"]: # try..except without checking exception.\n try:\n jump_instruction = instructions[exception_end_instruction_index - 1]\n if jump_instruction.opname not in ("JUMP_FORWARD", "JUMP_ABSOLUTE"):\n return None\n except IndexError:\n pass\n\n if jump_instruction.opname == "JUMP_ABSOLUTE":\n # On latest versions of Python 3 the interpreter has a go-backwards step,\n # used to show the initial line of a for/while, etc (which is this\n # JUMP_ABSOLUTE)... we're not really interested in it, but rather on where\n # it points to.\n except_end_instruction = instructions[offset_to_instruction_idx[jump_instruction.argval]]\n idx = offset_to_instruction_idx[except_end_instruction.argval]\n # Search for the POP_EXCEPT which should be at the end of the block.\n for pop_except_instruction in reversed(instructions[:idx]):\n if pop_except_instruction.opname == "POP_EXCEPT":\n except_end_instruction = pop_except_instruction\n return _TargetInfo(except_end_instruction)\n else:\n return None # i.e.: Continue outer loop\n\n else:\n # JUMP_FORWARD\n i = offset_to_instruction_idx[jump_instruction.argval]\n try:\n # i.e.: the jump is to the instruction after the block finishes (so, we need to\n # get the previous instruction as that should be the place where the exception\n # block finishes).\n except_end_instruction = instructions[i - 1]\n except:\n pydev_log.critical("Error when computing try..except block end.")\n return None\n return _TargetInfo(except_end_instruction)\n\n elif next_3 and next_3[0] == "DUP_TOP": # try..except AssertionError.\n iter_in = instructions[exception_end_instruction_index + 1 :]\n for j, jump_if_not_exc_instruction in enumerate(iter_in):\n if jump_if_not_exc_instruction.opname == "JUMP_IF_NOT_EXC_MATCH":\n # Python 3.9\n except_end_instruction = instructions[offset_to_instruction_idx[jump_if_not_exc_instruction.argval]]\n return _TargetInfo(except_end_instruction, jump_if_not_exc_instruction)\n\n elif jump_if_not_exc_instruction.opname == "COMPARE_OP" and jump_if_not_exc_instruction.argval == "exception match":\n # Python 3.8 and before\n try:\n next_instruction = iter_in[j + 1]\n except:\n continue\n if next_instruction.opname == "POP_JUMP_IF_FALSE":\n except_end_instruction = instructions[offset_to_instruction_idx[next_instruction.argval]]\n return _TargetInfo(except_end_instruction, next_instruction)\n else:\n return None # i.e.: Continue outer loop\n\n else:\n # i.e.: we're not interested in try..finally statements, only try..except.\n return None\n\n def collect_try_except_info(co, use_func_first_line=False):\n # We no longer have 'END_FINALLY', so, we need to do things differently in Python 3.9\n if not hasattr(co, "co_lines") and not hasattr(co, "co_lnotab"):\n return []\n\n if use_func_first_line:\n firstlineno = co.co_firstlineno\n else:\n firstlineno = 0\n\n try_except_info_lst = []\n\n op_offset_to_line = dict(entry for entry in dis.findlinestarts(co) if entry[1] is not None)\n\n offset_to_instruction_idx = {}\n\n instructions = list(iter_instructions(co))\n\n for i, instruction in enumerate(instructions):\n offset_to_instruction_idx[instruction.offset] = i\n\n for i, instruction in enumerate(instructions):\n curr_op_name = instruction.opname\n if curr_op_name in ("SETUP_FINALLY", "SETUP_EXCEPT"): # SETUP_EXCEPT before Python 3.8, SETUP_FINALLY Python 3.8 onwards.\n exception_end_instruction_index = offset_to_instruction_idx[instruction.argval]\n\n jump_instruction = instructions[exception_end_instruction_index - 1]\n if jump_instruction.opname not in ("JUMP_FORWARD", "JUMP_ABSOLUTE"):\n continue\n\n except_end_instruction = None\n indexes_checked = set()\n indexes_checked.add(exception_end_instruction_index)\n target_info = _get_except_target_info(instructions, exception_end_instruction_index, offset_to_instruction_idx)\n while target_info is not None:\n # Handle a try..except..except..except.\n jump_instruction = target_info.jump_if_not_exc_instruction\n except_end_instruction = target_info.except_end_instruction\n\n if jump_instruction is not None:\n check_index = offset_to_instruction_idx[jump_instruction.argval]\n if check_index in indexes_checked:\n break\n indexes_checked.add(check_index)\n target_info = _get_except_target_info(instructions, check_index, offset_to_instruction_idx)\n else:\n break\n\n if except_end_instruction is not None:\n try_except_info = TryExceptInfo(\n _get_line(op_offset_to_line, instruction.offset, firstlineno, search=True), ignore=False\n )\n try_except_info.except_bytecode_offset = instruction.argval\n try_except_info.except_line = _get_line(\n op_offset_to_line, try_except_info.except_bytecode_offset, firstlineno, search=True\n )\n\n try_except_info.except_end_bytecode_offset = except_end_instruction.offset\n try_except_info.except_end_line = _get_line(op_offset_to_line, except_end_instruction.offset, firstlineno, search=True)\n try_except_info_lst.append(try_except_info)\n\n for raise_instruction in instructions[i : offset_to_instruction_idx[try_except_info.except_end_bytecode_offset]]:\n if raise_instruction.opname == "RAISE_VARARGS":\n if raise_instruction.argval == 0:\n try_except_info.raise_lines_in_except.append(\n _get_line(op_offset_to_line, raise_instruction.offset, firstlineno, search=True)\n )\n\n return try_except_info_lst\n\nelif sys.version_info[:2] == (3, 10):\n\n class _TargetInfo(object):\n def __init__(self, except_end_instruction, jump_if_not_exc_instruction=None):\n self.except_end_instruction = except_end_instruction\n self.jump_if_not_exc_instruction = jump_if_not_exc_instruction\n\n def __str__(self):\n msg = ["_TargetInfo("]\n msg.append(self.except_end_instruction.opname)\n if self.jump_if_not_exc_instruction:\n msg.append(" - ")\n msg.append(self.jump_if_not_exc_instruction.opname)\n msg.append("(")\n msg.append(str(self.jump_if_not_exc_instruction.argval))\n msg.append(")")\n msg.append(")")\n return "".join(msg)\n\n def _get_except_target_info(instructions, exception_end_instruction_index, offset_to_instruction_idx):\n next_3 = [\n j_instruction.opname for j_instruction in instructions[exception_end_instruction_index : exception_end_instruction_index + 3]\n ]\n # print('next_3:', [(j_instruction.opname, j_instruction.argval) for j_instruction in instructions[exception_end_instruction_index:exception_end_instruction_index + 3]])\n if next_3 == ["POP_TOP", "POP_TOP", "POP_TOP"]: # try..except without checking exception.\n # Previously there was a jump which was able to point where the exception would end. This\n # is no longer true, now a bare except doesn't really have any indication in the bytecode\n # where the end would be expected if the exception wasn't raised, so, we just blindly\n # search for a POP_EXCEPT from the current position.\n for pop_except_instruction in instructions[exception_end_instruction_index + 3 :]:\n if pop_except_instruction.opname == "POP_EXCEPT":\n except_end_instruction = pop_except_instruction\n return _TargetInfo(except_end_instruction)\n\n elif next_3 and next_3[0] == "DUP_TOP": # try..except AssertionError.\n iter_in = instructions[exception_end_instruction_index + 1 :]\n for jump_if_not_exc_instruction in iter_in:\n if jump_if_not_exc_instruction.opname == "JUMP_IF_NOT_EXC_MATCH":\n # Python 3.9\n except_end_instruction = instructions[offset_to_instruction_idx[jump_if_not_exc_instruction.argval]]\n return _TargetInfo(except_end_instruction, jump_if_not_exc_instruction)\n else:\n return None # i.e.: Continue outer loop\n\n else:\n # i.e.: we're not interested in try..finally statements, only try..except.\n return None\n\n def collect_try_except_info(co, use_func_first_line=False):\n # We no longer have 'END_FINALLY', so, we need to do things differently in Python 3.9\n if not hasattr(co, "co_lines") and not hasattr(co, "co_lnotab"):\n return []\n\n if use_func_first_line:\n firstlineno = co.co_firstlineno\n else:\n firstlineno = 0\n\n try_except_info_lst = []\n\n op_offset_to_line = dict(entry for entry in dis.findlinestarts(co) if entry[1] is not None)\n\n offset_to_instruction_idx = {}\n\n instructions = list(iter_instructions(co))\n\n for i, instruction in enumerate(instructions):\n offset_to_instruction_idx[instruction.offset] = i\n\n for i, instruction in enumerate(instructions):\n curr_op_name = instruction.opname\n if curr_op_name == "SETUP_FINALLY":\n exception_end_instruction_index = offset_to_instruction_idx[instruction.argval]\n\n jump_instruction = instructions[exception_end_instruction_index]\n if jump_instruction.opname != "DUP_TOP":\n continue\n\n except_end_instruction = None\n indexes_checked = set()\n indexes_checked.add(exception_end_instruction_index)\n target_info = _get_except_target_info(instructions, exception_end_instruction_index, offset_to_instruction_idx)\n while target_info is not None:\n # Handle a try..except..except..except.\n jump_instruction = target_info.jump_if_not_exc_instruction\n except_end_instruction = target_info.except_end_instruction\n\n if jump_instruction is not None:\n check_index = offset_to_instruction_idx[jump_instruction.argval]\n if check_index in indexes_checked:\n break\n indexes_checked.add(check_index)\n target_info = _get_except_target_info(instructions, check_index, offset_to_instruction_idx)\n else:\n break\n\n if except_end_instruction is not None:\n try_except_info = TryExceptInfo(\n _get_line(op_offset_to_line, instruction.offset, firstlineno, search=True), ignore=False\n )\n try_except_info.except_bytecode_offset = instruction.argval\n try_except_info.except_line = _get_line(\n op_offset_to_line, try_except_info.except_bytecode_offset, firstlineno, search=True\n )\n\n try_except_info.except_end_bytecode_offset = except_end_instruction.offset\n\n # On Python 3.10 the final line of the except end isn't really correct, rather,\n # it's engineered to be the same line of the except and not the end line of the\n # block, so, the approach taken is to search for the biggest line between the\n # except and the end instruction\n except_end_line = -1\n start_i = offset_to_instruction_idx[try_except_info.except_bytecode_offset]\n end_i = offset_to_instruction_idx[except_end_instruction.offset]\n for instruction in instructions[start_i : end_i + 1]:\n found_at_line = op_offset_to_line.get(instruction.offset)\n if found_at_line is not None and found_at_line > except_end_line:\n except_end_line = found_at_line\n try_except_info.except_end_line = except_end_line - firstlineno\n\n try_except_info_lst.append(try_except_info)\n\n for raise_instruction in instructions[i : offset_to_instruction_idx[try_except_info.except_end_bytecode_offset]]:\n if raise_instruction.opname == "RAISE_VARARGS":\n if raise_instruction.argval == 0:\n try_except_info.raise_lines_in_except.append(\n _get_line(op_offset_to_line, raise_instruction.offset, firstlineno, search=True)\n )\n\n return try_except_info_lst\n\nelif sys.version_info[:2] >= (3, 11):\n\n def collect_try_except_info(co, use_func_first_line=False):\n """\n Note: if the filename is available and we can get the source,\n `collect_try_except_info_from_source` is preferred (this is kept as\n a fallback for cases where sources aren't available).\n """\n return []\n\n\nimport ast as ast_module\n\n\nclass _Visitor(ast_module.NodeVisitor):\n def __init__(self):\n self.try_except_infos = []\n self._stack = []\n self._in_except_stack = []\n self.max_line = -1\n\n def generic_visit(self, node):\n if hasattr(node, "lineno"):\n if node.lineno > self.max_line:\n self.max_line = node.lineno\n return ast_module.NodeVisitor.generic_visit(self, node)\n\n def visit_Try(self, node):\n info = TryExceptInfo(node.lineno, ignore=True)\n self._stack.append(info)\n self.generic_visit(node)\n assert info is self._stack.pop()\n if not info.ignore:\n self.try_except_infos.insert(0, info)\n\n if sys.version_info[0] < 3:\n visit_TryExcept = visit_Try\n\n def visit_ExceptHandler(self, node):\n info = self._stack[-1]\n info.ignore = False\n if info.except_line == -1:\n info.except_line = node.lineno\n self._in_except_stack.append(info)\n self.generic_visit(node)\n if hasattr(node, "end_lineno"):\n info.except_end_line = node.end_lineno\n else:\n info.except_end_line = self.max_line\n self._in_except_stack.pop()\n\n if sys.version_info[0] >= 3:\n\n def visit_Raise(self, node):\n for info in self._in_except_stack:\n if node.exc is None:\n info.raise_lines_in_except.append(node.lineno)\n self.generic_visit(node)\n\n else:\n\n def visit_Raise(self, node):\n for info in self._in_except_stack:\n if node.type is None and node.tback is None:\n info.raise_lines_in_except.append(node.lineno)\n self.generic_visit(node)\n\n\ndef collect_try_except_info_from_source(filename):\n with open(filename, "rb") as stream:\n contents = stream.read()\n return collect_try_except_info_from_contents(contents, filename)\n\n\ndef collect_try_except_info_from_contents(contents, filename="<unknown>"):\n ast = ast_module.parse(contents, filename)\n visitor = _Visitor()\n visitor.visit(ast)\n return visitor.try_except_infos\n\n\nRESTART_FROM_LOOKAHEAD = object()\nSEPARATOR = object()\n\n\nclass _MsgPart(object):\n def __init__(self, line, tok):\n assert line >= 0\n self.line = line\n self.tok = tok\n\n def __str__(self) -> str:\n return "_MsgPart(line: %s tok: %s)" % (self.line, self.tok)\n\n __repr__ = __str__\n\n @classmethod\n def add_to_line_to_contents(cls, obj, line_to_contents, line=None):\n if isinstance(obj, (list, tuple)):\n for o in obj:\n cls.add_to_line_to_contents(o, line_to_contents, line=line)\n return\n\n if isinstance(obj, str):\n assert line is not None\n line = int(line)\n lst = line_to_contents.setdefault(line, [])\n lst.append(obj)\n return\n\n if isinstance(obj, _MsgPart):\n if isinstance(obj.tok, (list, tuple)):\n cls.add_to_line_to_contents(obj.tok, line_to_contents, line=obj.line)\n return\n\n if isinstance(obj.tok, str):\n lst = line_to_contents.setdefault(obj.line, [])\n lst.append(obj.tok)\n return\n\n raise AssertionError("Unhandled: %" % (obj,))\n\n\nclass _Disassembler(object):\n def __init__(self, co, firstlineno, level=0):\n self.co = co\n self.firstlineno = firstlineno\n self.level = level\n self.instructions = list(iter_instructions(co))\n op_offset_to_line = self.op_offset_to_line = dict(entry for entry in dis.findlinestarts(co) if entry[1] is not None)\n\n # Update offsets so that all offsets have the line index (and update it based on\n # the passed firstlineno).\n line_index = co.co_firstlineno - firstlineno\n for instruction in self.instructions:\n new_line_index = op_offset_to_line.get(instruction.offset)\n if new_line_index is not None:\n line_index = new_line_index - firstlineno\n op_offset_to_line[instruction.offset] = line_index\n else:\n op_offset_to_line[instruction.offset] = line_index\n\n BIG_LINE_INT = 9999999\n SMALL_LINE_INT = -1\n\n def min_line(self, *args):\n m = self.BIG_LINE_INT\n for arg in args:\n if isinstance(arg, (list, tuple)):\n m = min(m, self.min_line(*arg))\n\n elif isinstance(arg, _MsgPart):\n m = min(m, arg.line)\n\n elif hasattr(arg, "offset"):\n m = min(m, self.op_offset_to_line[arg.offset])\n return m\n\n def max_line(self, *args):\n m = self.SMALL_LINE_INT\n for arg in args:\n if isinstance(arg, (list, tuple)):\n m = max(m, self.max_line(*arg))\n\n elif isinstance(arg, _MsgPart):\n m = max(m, arg.line)\n\n elif hasattr(arg, "offset"):\n m = max(m, self.op_offset_to_line[arg.offset])\n return m\n\n def _lookahead(self):\n """\n This handles and converts some common constructs from bytecode to actual source code.\n\n It may change the list of instructions.\n """\n msg = self._create_msg_part\n found = []\n fullrepr = None\n\n # Collect all the load instructions\n for next_instruction in self.instructions:\n if next_instruction.opname in ("LOAD_GLOBAL", "LOAD_FAST", "LOAD_CONST", "LOAD_NAME"):\n found.append(next_instruction)\n else:\n break\n\n if not found:\n return None\n\n if next_instruction.opname == "LOAD_ATTR":\n prev_instruction = found[-1]\n # Remove the current LOAD_ATTR\n assert self.instructions.pop(len(found)) is next_instruction\n\n # Add the LOAD_ATTR to the previous LOAD\n self.instructions[len(found) - 1] = _Instruction(\n prev_instruction.opname,\n prev_instruction.opcode,\n prev_instruction.starts_line,\n prev_instruction.argval,\n False, # prev_instruction.is_jump_target,\n prev_instruction.offset,\n (msg(prev_instruction), msg(prev_instruction, "."), msg(next_instruction)),\n )\n return RESTART_FROM_LOOKAHEAD\n\n if next_instruction.opname in ("CALL_FUNCTION", "PRECALL", "CALL"):\n if len(found) == next_instruction.argval + 1:\n force_restart = False\n delta = 0\n else:\n force_restart = True\n if len(found) > next_instruction.argval + 1:\n delta = len(found) - (next_instruction.argval + 1)\n else:\n return None # This is odd\n\n del_upto = delta + next_instruction.argval + 2 # +2 = NAME / CALL_FUNCTION\n if next_instruction.opname == "PRECALL":\n del_upto += 1 # Also remove the CALL right after the PRECALL.\n del self.instructions[delta:del_upto]\n\n found = iter(found[delta:])\n call_func = next(found)\n args = list(found)\n fullrepr = [\n msg(call_func),\n msg(call_func, "("),\n ]\n prev = call_func\n for i, arg in enumerate(args):\n if i > 0:\n fullrepr.append(msg(prev, ", "))\n prev = arg\n fullrepr.append(msg(arg))\n\n fullrepr.append(msg(prev, ")"))\n\n if force_restart:\n self.instructions.insert(\n delta,\n _Instruction(\n call_func.opname,\n call_func.opcode,\n call_func.starts_line,\n call_func.argval,\n False, # call_func.is_jump_target,\n call_func.offset,\n tuple(fullrepr),\n ),\n )\n return RESTART_FROM_LOOKAHEAD\n\n elif next_instruction.opname == "BUILD_TUPLE":\n if len(found) == next_instruction.argval:\n force_restart = False\n delta = 0\n else:\n force_restart = True\n if len(found) > next_instruction.argval:\n delta = len(found) - (next_instruction.argval)\n else:\n return None # This is odd\n\n del self.instructions[delta : delta + next_instruction.argval + 1] # +1 = BUILD_TUPLE\n\n found = iter(found[delta:])\n\n args = [instruction for instruction in found]\n if args:\n first_instruction = args[0]\n else:\n first_instruction = next_instruction\n prev = first_instruction\n\n fullrepr = []\n fullrepr.append(msg(prev, "("))\n for i, arg in enumerate(args):\n if i > 0:\n fullrepr.append(msg(prev, ", "))\n prev = arg\n fullrepr.append(msg(arg))\n\n fullrepr.append(msg(prev, ")"))\n\n if force_restart:\n self.instructions.insert(\n delta,\n _Instruction(\n first_instruction.opname,\n first_instruction.opcode,\n first_instruction.starts_line,\n first_instruction.argval,\n False, # first_instruction.is_jump_target,\n first_instruction.offset,\n tuple(fullrepr),\n ),\n )\n return RESTART_FROM_LOOKAHEAD\n\n if fullrepr is not None and self.instructions:\n if self.instructions[0].opname == "POP_TOP":\n self.instructions.pop(0)\n\n if self.instructions[0].opname in ("STORE_FAST", "STORE_NAME"):\n next_instruction = self.instructions.pop(0)\n return msg(next_instruction), msg(next_instruction, " = "), fullrepr\n\n if self.instructions[0].opname == "RETURN_VALUE":\n next_instruction = self.instructions.pop(0)\n return msg(next_instruction, "return ", line=self.min_line(next_instruction, fullrepr)), fullrepr\n\n return fullrepr\n\n def _decorate_jump_target(self, instruction, instruction_repr):\n if instruction.is_jump_target:\n return ("|", str(instruction.offset), "|", instruction_repr)\n\n return instruction_repr\n\n def _create_msg_part(self, instruction, tok=None, line=None):\n dec = self._decorate_jump_target\n if line is None or line in (self.BIG_LINE_INT, self.SMALL_LINE_INT):\n line = self.op_offset_to_line[instruction.offset]\n\n argrepr = instruction.argrepr\n if isinstance(argrepr, str) and argrepr.startswith("NULL + "):\n argrepr = argrepr[7:]\n if isinstance(argrepr, str) and argrepr.endswith("+ NULL"):\n argrepr = argrepr[:-7]\n return _MsgPart(line, tok if tok is not None else dec(instruction, argrepr))\n\n def _next_instruction_to_str(self, line_to_contents):\n # indent = ''\n # if self.level > 0:\n # indent += ' ' * self.level\n # print(indent, 'handle', self.instructions[0])\n\n if self.instructions:\n ret = self._lookahead()\n if ret:\n return ret\n\n msg = self._create_msg_part\n\n instruction = self.instructions.pop(0)\n\n if instruction.opname in ("RESUME", "NULL"):\n return None\n\n if instruction.opname == "RETURN_CONST":\n return (msg(instruction, "return ", line=self.min_line(instruction)), msg(instruction))\n\n if instruction.opname in ("LOAD_GLOBAL", "LOAD_FAST", "LOAD_CONST", "LOAD_NAME"):\n next_instruction = self.instructions[0]\n if next_instruction.opname in ("STORE_FAST", "STORE_NAME"):\n self.instructions.pop(0)\n return (msg(next_instruction), msg(next_instruction, " = "), msg(instruction))\n\n if next_instruction.opname == "RETURN_VALUE":\n self.instructions.pop(0)\n return (msg(instruction, "return ", line=self.min_line(instruction)), msg(instruction))\n\n if next_instruction.opname == "RAISE_VARARGS" and next_instruction.argval == 1:\n self.instructions.pop(0)\n return (msg(instruction, "raise ", line=self.min_line(instruction)), msg(instruction))\n\n if instruction.opname == "LOAD_CONST":\n if inspect.iscode(instruction.argval):\n code_line_to_contents = _Disassembler(instruction.argval, self.firstlineno, self.level + 1).build_line_to_contents()\n\n for contents in code_line_to_contents.values():\n contents.insert(0, " ")\n for line, contents in code_line_to_contents.items():\n line_to_contents.setdefault(line, []).extend(contents)\n return msg(instruction, "LOAD_CONST(code)")\n\n if instruction.opname == "RAISE_VARARGS":\n if instruction.argval == 0:\n return msg(instruction, "raise")\n\n if instruction.opname == "SETUP_FINALLY":\n return msg(instruction, ("try(", instruction.argrepr, "):"))\n\n if instruction.argrepr:\n return msg(instruction, (instruction.opname, "(", instruction.argrepr, ")"))\n\n if instruction.argval:\n return msg(\n instruction,\n "%s{%s}"\n % (\n instruction.opname,\n instruction.argval,\n ),\n )\n\n return msg(instruction, instruction.opname)\n\n def build_line_to_contents(self):\n # print('----')\n # for instruction in self.instructions:\n # print(instruction)\n # print('----\n\n')\n\n line_to_contents = {}\n\n instructions = self.instructions\n while instructions:\n s = self._next_instruction_to_str(line_to_contents)\n if s is RESTART_FROM_LOOKAHEAD:\n continue\n if s is None:\n continue\n\n _MsgPart.add_to_line_to_contents(s, line_to_contents)\n m = self.max_line(s)\n if m != self.SMALL_LINE_INT:\n line_to_contents.setdefault(m, []).append(SEPARATOR)\n return line_to_contents\n\n def disassemble(self):\n line_to_contents = self.build_line_to_contents()\n stream = StringIO()\n last_line = 0\n show_lines = False\n for line, contents in sorted(line_to_contents.items()):\n while last_line < line - 1:\n if show_lines:\n stream.write("%s.\n" % (last_line + 1,))\n else:\n stream.write("\n")\n last_line += 1\n\n if show_lines:\n stream.write("%s. " % (line,))\n\n for i, content in enumerate(contents):\n if content == SEPARATOR:\n if i != len(contents) - 1:\n stream.write(", ")\n else:\n stream.write(content)\n\n stream.write("\n")\n\n last_line = line\n\n return stream.getvalue()\n\n\ndef code_to_bytecode_representation(co, use_func_first_line=False):\n """\n A simple disassemble of bytecode.\n\n It does not attempt to provide the full Python source code, rather, it provides a low-level\n representation of the bytecode, respecting the lines (so, its target is making the bytecode\n easier to grasp and not providing the original source code).\n\n Note that it does show jump locations/targets and converts some common bytecode constructs to\n Python code to make it a bit easier to understand.\n """\n # Reference for bytecodes:\n # https://docs.python.org/3/library/dis.html\n if use_func_first_line:\n firstlineno = co.co_firstlineno\n else:\n firstlineno = 0\n\n return _Disassembler(co, firstlineno).disassemble()\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_collect_bytecode_info.py
|
pydevd_collect_bytecode_info.py
|
Python
| 36,003 | 0.95 | 0.256586 | 0.062857 |
react-lib
| 556 |
2025-02-07T06:18:00.894106
|
MIT
| false |
7fd5d4dc8c87e9d95c6857d81ade1ef7
|
import os\nimport sys\n\n\nclass ArgHandlerWithParam:\n """\n Handler for some arguments which needs a value\n """\n\n def __init__(self, arg_name, convert_val=None, default_val=None):\n self.arg_name = arg_name\n self.arg_v_rep = "--%s" % (arg_name,)\n self.convert_val = convert_val\n self.default_val = default_val\n\n def to_argv(self, lst, setup):\n v = setup.get(self.arg_name)\n if v is not None and v != self.default_val:\n lst.append(self.arg_v_rep)\n lst.append("%s" % (v,))\n\n def handle_argv(self, argv, i, setup):\n assert argv[i] == self.arg_v_rep\n del argv[i]\n\n val = argv[i]\n if self.convert_val:\n val = self.convert_val(val)\n\n setup[self.arg_name] = val\n del argv[i]\n\n\nclass ArgHandlerBool:\n """\n If a given flag is received, mark it as 'True' in setup.\n """\n\n def __init__(self, arg_name, default_val=False):\n self.arg_name = arg_name\n self.arg_v_rep = "--%s" % (arg_name,)\n self.default_val = default_val\n\n def to_argv(self, lst, setup):\n v = setup.get(self.arg_name)\n if v:\n lst.append(self.arg_v_rep)\n\n def handle_argv(self, argv, i, setup):\n assert argv[i] == self.arg_v_rep\n del argv[i]\n setup[self.arg_name] = True\n\n\ndef convert_ppid(ppid):\n ret = int(ppid)\n if ret != 0:\n if ret == os.getpid():\n raise AssertionError("ppid passed is the same as the current process pid (%s)!" % (ret,))\n return ret\n\n\nACCEPTED_ARG_HANDLERS = [\n ArgHandlerWithParam("port", int, 0),\n ArgHandlerWithParam("ppid", convert_ppid, 0),\n ArgHandlerWithParam("vm_type"),\n ArgHandlerWithParam("client"),\n ArgHandlerWithParam("access-token"),\n ArgHandlerWithParam("client-access-token"),\n ArgHandlerWithParam("debug-mode"),\n ArgHandlerWithParam("preimport"),\n # Logging\n ArgHandlerWithParam("log-file"),\n ArgHandlerWithParam("log-level", int, None),\n ArgHandlerBool("server"),\n ArgHandlerBool("multiproc"), # Used by PyCharm (reuses connection: ssh tunneling)\n ArgHandlerBool("multiprocess"), # Used by PyDev (creates new connection to ide)\n ArgHandlerBool("save-signatures"),\n ArgHandlerBool("save-threading"),\n ArgHandlerBool("save-asyncio"),\n ArgHandlerBool("print-in-debugger-startup"),\n ArgHandlerBool("cmd-line"),\n ArgHandlerBool("module"),\n ArgHandlerBool("skip-notify-stdin"),\n # The ones below should've been just one setting to specify the protocol, but for compatibility\n # reasons they're passed as a flag but are mutually exclusive.\n ArgHandlerBool("json-dap"), # Protocol used by ptvsd to communicate with pydevd (a single json message in each read)\n ArgHandlerBool("json-dap-http"), # Actual DAP (json messages over http protocol).\n ArgHandlerBool("protocol-quoted-line"), # Custom protocol with quoted lines.\n ArgHandlerBool("protocol-http"), # Custom protocol with http.\n]\n\nARGV_REP_TO_HANDLER = {}\nfor handler in ACCEPTED_ARG_HANDLERS:\n ARGV_REP_TO_HANDLER[handler.arg_v_rep] = handler\n\n\ndef get_pydevd_file():\n import pydevd\n\n f = pydevd.__file__\n if f.endswith(".pyc"):\n f = f[:-1]\n elif f.endswith("$py.class"):\n f = f[: -len("$py.class")] + ".py"\n return f\n\n\ndef setup_to_argv(setup, skip_names=None):\n """\n :param dict setup:\n A dict previously gotten from process_command_line.\n\n :param set skip_names:\n The names in the setup which shouldn't be converted to argv.\n\n :note: does not handle --file nor --DEBUG.\n """\n if skip_names is None:\n skip_names = set()\n ret = [get_pydevd_file()]\n\n for handler in ACCEPTED_ARG_HANDLERS:\n if handler.arg_name in setup and handler.arg_name not in skip_names:\n handler.to_argv(ret, setup)\n return ret\n\n\ndef process_command_line(argv):\n """parses the arguments.\n removes our arguments from the command line"""\n setup = {}\n for handler in ACCEPTED_ARG_HANDLERS:\n setup[handler.arg_name] = handler.default_val\n setup["file"] = ""\n setup["qt-support"] = ""\n\n initial_argv = tuple(argv)\n\n i = 0\n del argv[0]\n while i < len(argv):\n handler = ARGV_REP_TO_HANDLER.get(argv[i])\n if handler is not None:\n handler.handle_argv(argv, i, setup)\n\n elif argv[i].startswith("--qt-support"):\n # The --qt-support is special because we want to keep backward compatibility:\n # Previously, just passing '--qt-support' meant that we should use the auto-discovery mode\n # whereas now, if --qt-support is passed, it should be passed as --qt-support=<mode>, where\n # mode can be one of 'auto', 'none', 'pyqt5', 'pyqt4', 'pyside', 'pyside2'.\n if argv[i] == "--qt-support":\n setup["qt-support"] = "auto"\n\n elif argv[i].startswith("--qt-support="):\n qt_support = argv[i][len("--qt-support=") :]\n valid_modes = ("none", "auto", "pyqt5", "pyqt4", "pyside", "pyside2")\n if qt_support not in valid_modes:\n raise ValueError("qt-support mode invalid: " + qt_support)\n if qt_support == "none":\n # On none, actually set an empty string to evaluate to False.\n setup["qt-support"] = ""\n else:\n setup["qt-support"] = qt_support\n else:\n raise ValueError("Unexpected definition for qt-support flag: " + argv[i])\n\n del argv[i]\n\n elif argv[i] == "--file":\n # --file is special because it's the last one (so, no handler for it).\n del argv[i]\n setup["file"] = argv[i]\n i = len(argv) # pop out, file is our last argument\n\n elif argv[i] == "--DEBUG":\n sys.stderr.write("pydevd: --DEBUG parameter deprecated. Use `--debug-level=3` instead.\n")\n\n else:\n raise ValueError("Unexpected option: %s when processing: %s" % (argv[i], initial_argv))\n return setup\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_command_line_handling.py
|
pydevd_command_line_handling.py
|
Python
| 6,283 | 0.95 | 0.19337 | 0.061644 |
vue-tools
| 808 |
2024-05-03T04:22:44.313499
|
Apache-2.0
| false |
06bac600c1285adfe39e11547390b333
|
CMD_RUN = 101\nCMD_LIST_THREADS = 102\nCMD_THREAD_CREATE = 103\nCMD_THREAD_KILL = 104\nCMD_THREAD_SUSPEND = 105\nCMD_THREAD_RUN = 106\nCMD_STEP_INTO = 107\nCMD_STEP_OVER = 108\nCMD_STEP_RETURN = 109\nCMD_GET_VARIABLE = 110\nCMD_SET_BREAK = 111\nCMD_REMOVE_BREAK = 112\nCMD_EVALUATE_EXPRESSION = 113\nCMD_GET_FRAME = 114\nCMD_EXEC_EXPRESSION = 115\nCMD_WRITE_TO_CONSOLE = 116\nCMD_CHANGE_VARIABLE = 117\nCMD_RUN_TO_LINE = 118\nCMD_RELOAD_CODE = 119\nCMD_GET_COMPLETIONS = 120\n\n# Note: renumbered (conflicted on merge)\nCMD_CONSOLE_EXEC = 121\nCMD_ADD_EXCEPTION_BREAK = 122\nCMD_REMOVE_EXCEPTION_BREAK = 123\nCMD_LOAD_SOURCE = 124\nCMD_ADD_DJANGO_EXCEPTION_BREAK = 125\nCMD_REMOVE_DJANGO_EXCEPTION_BREAK = 126\nCMD_SET_NEXT_STATEMENT = 127\nCMD_SMART_STEP_INTO = 128\nCMD_EXIT = 129\nCMD_SIGNATURE_CALL_TRACE = 130\n\nCMD_SET_PY_EXCEPTION = 131\nCMD_GET_FILE_CONTENTS = 132\nCMD_SET_PROPERTY_TRACE = 133\n# Pydev debug console commands\nCMD_EVALUATE_CONSOLE_EXPRESSION = 134\nCMD_RUN_CUSTOM_OPERATION = 135\nCMD_GET_BREAKPOINT_EXCEPTION = 136\nCMD_STEP_CAUGHT_EXCEPTION = 137\nCMD_SEND_CURR_EXCEPTION_TRACE = 138\nCMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED = 139\nCMD_IGNORE_THROWN_EXCEPTION_AT = 140\nCMD_ENABLE_DONT_TRACE = 141\nCMD_SHOW_CONSOLE = 142\n\nCMD_GET_ARRAY = 143\nCMD_STEP_INTO_MY_CODE = 144\nCMD_GET_CONCURRENCY_EVENT = 145\nCMD_SHOW_RETURN_VALUES = 146\nCMD_INPUT_REQUESTED = 147\nCMD_GET_DESCRIPTION = 148\n\nCMD_PROCESS_CREATED = 149\nCMD_SHOW_CYTHON_WARNING = 150\nCMD_LOAD_FULL_VALUE = 151\n\nCMD_GET_THREAD_STACK = 152\n\n# This is mostly for unit-tests to diagnose errors on ci.\nCMD_THREAD_DUMP_TO_STDERR = 153\n\n# Sent from the client to signal that we should stop when we start executing user code.\nCMD_STOP_ON_START = 154\n\n# When the debugger is stopped in an exception, this command will provide the details of the current exception (in the current thread).\nCMD_GET_EXCEPTION_DETAILS = 155\n\n# Allows configuring pydevd settings (can be called multiple times and only keys\n# available in the json will be configured -- keys not passed will not change the\n# previous configuration).\nCMD_PYDEVD_JSON_CONFIG = 156\n\nCMD_THREAD_SUSPEND_SINGLE_NOTIFICATION = 157\nCMD_THREAD_RESUME_SINGLE_NOTIFICATION = 158\n\nCMD_STEP_OVER_MY_CODE = 159\nCMD_STEP_RETURN_MY_CODE = 160\n\nCMD_SET_PY_EXCEPTION_JSON = 161\nCMD_SET_PATH_MAPPING_JSON = 162\n\nCMD_GET_SMART_STEP_INTO_VARIANTS = 163 # XXX: PyCharm has 160 for this (we're currently incompatible anyways).\n\nCMD_REDIRECT_OUTPUT = 200\nCMD_GET_NEXT_STATEMENT_TARGETS = 201\nCMD_SET_PROJECT_ROOTS = 202\n\nCMD_MODULE_EVENT = 203\nCMD_PROCESS_EVENT = 204\n\nCMD_AUTHENTICATE = 205\n\nCMD_STEP_INTO_COROUTINE = 206\n\nCMD_LOAD_SOURCE_FROM_FRAME_ID = 207\n\nCMD_SET_FUNCTION_BREAK = 208\n\nCMD_VERSION = 501\nCMD_RETURN = 502\nCMD_SET_PROTOCOL = 503\nCMD_ERROR = 901\n\n# this number can be changed if there's need to do so\n# if the io is too big, we'll not send all (could make the debugger too non-responsive)\nMAX_IO_MSG_SIZE = 10000\n\nVERSION_STRING = "@@BUILD_NUMBER@@"\n\nfrom _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding\n\nfile_system_encoding = getfilesystemencoding()\nfilesystem_encoding_is_utf8 = file_system_encoding.lower() in ("utf-8", "utf_8", "utf8")\n\nID_TO_MEANING = {\n "101": "CMD_RUN",\n "102": "CMD_LIST_THREADS",\n "103": "CMD_THREAD_CREATE",\n "104": "CMD_THREAD_KILL",\n "105": "CMD_THREAD_SUSPEND",\n "106": "CMD_THREAD_RUN",\n "107": "CMD_STEP_INTO",\n "108": "CMD_STEP_OVER",\n "109": "CMD_STEP_RETURN",\n "110": "CMD_GET_VARIABLE",\n "111": "CMD_SET_BREAK",\n "112": "CMD_REMOVE_BREAK",\n "113": "CMD_EVALUATE_EXPRESSION",\n "114": "CMD_GET_FRAME",\n "115": "CMD_EXEC_EXPRESSION",\n "116": "CMD_WRITE_TO_CONSOLE",\n "117": "CMD_CHANGE_VARIABLE",\n "118": "CMD_RUN_TO_LINE",\n "119": "CMD_RELOAD_CODE",\n "120": "CMD_GET_COMPLETIONS",\n "121": "CMD_CONSOLE_EXEC",\n "122": "CMD_ADD_EXCEPTION_BREAK",\n "123": "CMD_REMOVE_EXCEPTION_BREAK",\n "124": "CMD_LOAD_SOURCE",\n "125": "CMD_ADD_DJANGO_EXCEPTION_BREAK",\n "126": "CMD_REMOVE_DJANGO_EXCEPTION_BREAK",\n "127": "CMD_SET_NEXT_STATEMENT",\n "128": "CMD_SMART_STEP_INTO",\n "129": "CMD_EXIT",\n "130": "CMD_SIGNATURE_CALL_TRACE",\n "131": "CMD_SET_PY_EXCEPTION",\n "132": "CMD_GET_FILE_CONTENTS",\n "133": "CMD_SET_PROPERTY_TRACE",\n "134": "CMD_EVALUATE_CONSOLE_EXPRESSION",\n "135": "CMD_RUN_CUSTOM_OPERATION",\n "136": "CMD_GET_BREAKPOINT_EXCEPTION",\n "137": "CMD_STEP_CAUGHT_EXCEPTION",\n "138": "CMD_SEND_CURR_EXCEPTION_TRACE",\n "139": "CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED",\n "140": "CMD_IGNORE_THROWN_EXCEPTION_AT",\n "141": "CMD_ENABLE_DONT_TRACE",\n "142": "CMD_SHOW_CONSOLE",\n "143": "CMD_GET_ARRAY",\n "144": "CMD_STEP_INTO_MY_CODE",\n "145": "CMD_GET_CONCURRENCY_EVENT",\n "146": "CMD_SHOW_RETURN_VALUES",\n "147": "CMD_INPUT_REQUESTED",\n "148": "CMD_GET_DESCRIPTION",\n "149": "CMD_PROCESS_CREATED", # Note: this is actually a notification of a sub-process created.\n "150": "CMD_SHOW_CYTHON_WARNING",\n "151": "CMD_LOAD_FULL_VALUE",\n "152": "CMD_GET_THREAD_STACK",\n "153": "CMD_THREAD_DUMP_TO_STDERR",\n "154": "CMD_STOP_ON_START",\n "155": "CMD_GET_EXCEPTION_DETAILS",\n "156": "CMD_PYDEVD_JSON_CONFIG",\n "157": "CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION",\n "158": "CMD_THREAD_RESUME_SINGLE_NOTIFICATION",\n "159": "CMD_STEP_OVER_MY_CODE",\n "160": "CMD_STEP_RETURN_MY_CODE",\n "161": "CMD_SET_PY_EXCEPTION_JSON",\n "162": "CMD_SET_PATH_MAPPING_JSON",\n "163": "CMD_GET_SMART_STEP_INTO_VARIANTS",\n "200": "CMD_REDIRECT_OUTPUT",\n "201": "CMD_GET_NEXT_STATEMENT_TARGETS",\n "202": "CMD_SET_PROJECT_ROOTS",\n "203": "CMD_MODULE_EVENT",\n "204": "CMD_PROCESS_EVENT", # DAP process event.\n "205": "CMD_AUTHENTICATE",\n "206": "CMD_STEP_INTO_COROUTINE",\n "207": "CMD_LOAD_SOURCE_FROM_FRAME_ID",\n "501": "CMD_VERSION",\n "502": "CMD_RETURN",\n "503": "CMD_SET_PROTOCOL",\n "901": "CMD_ERROR",\n}\n\n\ndef constant_to_str(constant):\n s = ID_TO_MEANING.get(str(constant))\n if not s:\n s = "<Unknown: %s>" % (constant,)\n return s\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_comm_constants.py
|
pydevd_comm_constants.py
|
Python
| 6,276 | 0.95 | 0.03 | 0.057803 |
vue-tools
| 302 |
2024-02-11T09:58:52.501158
|
GPL-3.0
| false |
78a8d3dbc7f101a7dc8d479f436ef68d
|
from _pydevd_bundle.pydevd_constants import get_current_thread_id, Null, ForkSafeLock\nfrom pydevd_file_utils import get_abs_path_real_path_and_base_from_frame\nfrom _pydev_bundle._pydev_saved_modules import thread, threading\nimport sys\nfrom _pydev_bundle import pydev_log\n\nDEBUG = False\n\n\nclass CustomFramesContainer:\n # Actual Values initialized later on.\n custom_frames_lock = None # : :type custom_frames_lock: threading.Lock\n\n custom_frames = None\n\n _next_frame_id = None\n\n _py_db_command_thread_event = None\n\n\ndef custom_frames_container_init(): # Note: no staticmethod on jython 2.1 (so, use free-function)\n CustomFramesContainer.custom_frames_lock = ForkSafeLock()\n\n # custom_frames can only be accessed if properly locked with custom_frames_lock!\n # Key is a string identifying the frame (as well as the thread it belongs to).\n # Value is a CustomFrame.\n #\n CustomFramesContainer.custom_frames = {}\n\n # Only to be used in this module\n CustomFramesContainer._next_frame_id = 0\n\n # This is the event we must set to release an internal process events. It's later set by the actual debugger\n # when we do create the debugger.\n CustomFramesContainer._py_db_command_thread_event = Null()\n\n\n# Initialize it the first time (it may be reinitialized later on when dealing with a fork).\ncustom_frames_container_init()\n\n\nclass CustomFrame:\n def __init__(self, name, frame, thread_id):\n # 0 = string with the representation of that frame\n self.name = name\n\n # 1 = the frame to show\n self.frame = frame\n\n # 2 = an integer identifying the last time the frame was changed.\n self.mod_time = 0\n\n # 3 = the thread id of the given frame\n self.thread_id = thread_id\n\n\ndef add_custom_frame(frame, name, thread_id):\n """\n It's possible to show paused frames by adding a custom frame through this API (it's\n intended to be used for coroutines, but could potentially be used for generators too).\n\n :param frame:\n The topmost frame to be shown paused when a thread with thread.ident == thread_id is paused.\n\n :param name:\n The name to be shown for the custom thread in the UI.\n\n :param thread_id:\n The thread id to which this frame is related (must match thread.ident).\n\n :return: str\n Returns the custom thread id which will be used to show the given frame paused.\n """\n with CustomFramesContainer.custom_frames_lock:\n curr_thread_id = get_current_thread_id(threading.current_thread())\n next_id = CustomFramesContainer._next_frame_id = CustomFramesContainer._next_frame_id + 1\n\n # Note: the frame id kept contains an id and thread information on the thread where the frame was added\n # so that later on we can check if the frame is from the current thread by doing frame_id.endswith('|'+thread_id).\n frame_custom_thread_id = "__frame__:%s|%s" % (next_id, curr_thread_id)\n if DEBUG:\n sys.stderr.write(\n "add_custom_frame: %s (%s) %s %s\n"\n % (frame_custom_thread_id, get_abs_path_real_path_and_base_from_frame(frame)[-1], frame.f_lineno, frame.f_code.co_name)\n )\n\n CustomFramesContainer.custom_frames[frame_custom_thread_id] = CustomFrame(name, frame, thread_id)\n CustomFramesContainer._py_db_command_thread_event.set()\n return frame_custom_thread_id\n\n\ndef update_custom_frame(frame_custom_thread_id, frame, thread_id, name=None):\n with CustomFramesContainer.custom_frames_lock:\n if DEBUG:\n sys.stderr.write("update_custom_frame: %s\n" % frame_custom_thread_id)\n try:\n old = CustomFramesContainer.custom_frames[frame_custom_thread_id]\n if name is not None:\n old.name = name\n old.mod_time += 1\n old.thread_id = thread_id\n except:\n sys.stderr.write("Unable to get frame to replace: %s\n" % (frame_custom_thread_id,))\n pydev_log.exception()\n\n CustomFramesContainer._py_db_command_thread_event.set()\n\n\ndef remove_custom_frame(frame_custom_thread_id):\n with CustomFramesContainer.custom_frames_lock:\n if DEBUG:\n sys.stderr.write("remove_custom_frame: %s\n" % frame_custom_thread_id)\n CustomFramesContainer.custom_frames.pop(frame_custom_thread_id, None)\n CustomFramesContainer._py_db_command_thread_event.set()\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_custom_frames.py
|
pydevd_custom_frames.py
|
Python
| 4,538 | 0.95 | 0.157895 | 0.180723 |
python-kit
| 830 |
2025-02-06T08:00:42.937093
|
MIT
| false |
ae7a209e76db6d1799b51e2e6c87f11c
|
cdef class PyDBAdditionalThreadInfo:\n cdef public int pydev_state\n cdef public object pydev_step_stop # Actually, it's a frame or None\n cdef public int pydev_original_step_cmd\n cdef public int pydev_step_cmd\n cdef public bint pydev_notify_kill\n cdef public object pydev_smart_step_stop # Actually, it's a frame or None\n cdef public bint pydev_django_resolve_frame\n cdef public object pydev_call_from_jinja2\n cdef public object pydev_call_inside_jinja2\n cdef public int is_tracing\n cdef public tuple conditional_breakpoint_exception\n cdef public str pydev_message\n cdef public int suspend_type\n cdef public int pydev_next_line\n cdef public str pydev_func_name\n cdef public bint suspended_at_unhandled\n cdef public str trace_suspend_type\n cdef public object top_level_thread_tracer_no_back_frames\n cdef public object top_level_thread_tracer_unhandled\n cdef public object thread_tracer\n cdef public object step_in_initial_location\n cdef public int pydev_smart_parent_offset\n cdef public int pydev_smart_child_offset\n cdef public tuple pydev_smart_step_into_variants\n cdef public dict target_id_to_smart_step_into_variant\n cdef public bint pydev_use_scoped_step_frame\n cdef public object weak_thread\n cdef public bint is_in_wait_loop\n \n cpdef get_topmost_frame(self, thread)\n cpdef update_stepping_info(self)\n \n # Private APIs\n cpdef object _get_related_thread(self)\n cpdef bint _is_stepping(self)\n\ncpdef set_additional_thread_info(thread)\n\ncpdef add_additional_info(PyDBAdditionalThreadInfo info)\ncpdef remove_additional_info(PyDBAdditionalThreadInfo info)\ncpdef bint any_thread_stepping()
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_cython.pxd
|
pydevd_cython.pxd
|
Other
| 1,732 | 0.95 | 0.02439 | 0.026316 |
vue-tools
| 755 |
2025-03-22T08:44:31.893532
|
MIT
| false |
ac97c5337bd8d04aae1a0a1eac82a86b
|
from __future__ import print_function\n\n# Important: Autogenerated file.\n\n# DO NOT edit manually!\n# DO NOT edit manually!\nfrom _pydevd_bundle.pydevd_constants import (\n STATE_RUN,\n PYTHON_SUSPEND,\n SUPPORT_GEVENT,\n ForkSafeLock,\n _current_frames,\n STATE_SUSPEND,\n get_global_debugger,\n get_thread_id,\n)\nfrom _pydev_bundle import pydev_log\nfrom _pydev_bundle._pydev_saved_modules import threading\nfrom _pydev_bundle.pydev_is_thread_alive import is_thread_alive\nimport weakref\n\nversion = 11\n\n\n# =======================================================================================================================\n# PyDBAdditionalThreadInfo\n# =======================================================================================================================\n# fmt: off\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\ncdef class PyDBAdditionalThreadInfo:\n# ELSE\n# class PyDBAdditionalThreadInfo(object):\n# ENDIF\n# fmt: on\n\n # Note: the params in cython are declared in pydevd_cython.pxd.\n # fmt: off\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n # ELSE\n# __slots__ = [\n# "pydev_state",\n# "pydev_step_stop",\n# "pydev_original_step_cmd",\n# "pydev_step_cmd",\n# "pydev_notify_kill",\n# "pydev_django_resolve_frame",\n# "pydev_call_from_jinja2",\n# "pydev_call_inside_jinja2",\n# "is_tracing",\n# "conditional_breakpoint_exception",\n# "pydev_message",\n# "suspend_type",\n# "pydev_next_line",\n# "pydev_func_name",\n# "suspended_at_unhandled",\n# "trace_suspend_type",\n# "top_level_thread_tracer_no_back_frames",\n# "top_level_thread_tracer_unhandled",\n# "thread_tracer",\n# "step_in_initial_location",\n# # Used for CMD_SMART_STEP_INTO (to know which smart step into variant to use)\n# "pydev_smart_parent_offset",\n# "pydev_smart_child_offset",\n# # Used for CMD_SMART_STEP_INTO (list[_pydevd_bundle.pydevd_bytecode_utils.Variant])\n# # Filled when the cmd_get_smart_step_into_variants is requested (so, this is a copy\n# # of the last request for a given thread and pydev_smart_parent_offset/pydev_smart_child_offset relies on it).\n# "pydev_smart_step_into_variants",\n# "target_id_to_smart_step_into_variant",\n# "pydev_use_scoped_step_frame",\n# "weak_thread",\n# "is_in_wait_loop",\n# ]\n # ENDIF\n # fmt: on\n\n def __init__(self):\n self.pydev_state = STATE_RUN # STATE_RUN or STATE_SUSPEND\n self.pydev_step_stop = None\n\n # Note: we have `pydev_original_step_cmd` and `pydev_step_cmd` because the original is to\n # say the action that started it and the other is to say what's the current tracing behavior\n # (because it's possible that we start with a step over but may have to switch to a\n # different step strategy -- for instance, if a step over is done and we return the current\n # method the strategy is changed to a step in).\n\n self.pydev_original_step_cmd = -1 # Something as CMD_STEP_INTO, CMD_STEP_OVER, etc.\n self.pydev_step_cmd = -1 # Something as CMD_STEP_INTO, CMD_STEP_OVER, etc.\n\n self.pydev_notify_kill = False\n self.pydev_django_resolve_frame = False\n self.pydev_call_from_jinja2 = None\n self.pydev_call_inside_jinja2 = None\n self.is_tracing = 0\n self.conditional_breakpoint_exception = None\n self.pydev_message = ""\n self.suspend_type = PYTHON_SUSPEND\n self.pydev_next_line = -1\n self.pydev_func_name = ".invalid." # Must match the type in cython\n self.suspended_at_unhandled = False\n self.trace_suspend_type = "trace" # 'trace' or 'frame_eval'\n self.top_level_thread_tracer_no_back_frames = []\n self.top_level_thread_tracer_unhandled = None\n self.thread_tracer = None\n self.step_in_initial_location = None\n self.pydev_smart_parent_offset = -1\n self.pydev_smart_child_offset = -1\n self.pydev_smart_step_into_variants = ()\n self.target_id_to_smart_step_into_variant = {}\n\n # Flag to indicate ipython use-case where each line will be executed as a call/line/return\n # in a new new frame but in practice we want to consider each new frame as if it was all\n # part of the same frame.\n #\n # In practice this means that a step over shouldn't revert to a step in and we need some\n # special logic to know when we should stop in a step over as we need to consider 2\n # different frames as being equal if they're logically the continuation of a frame\n # being executed by ipython line by line.\n #\n # See: https://github.com/microsoft/debugpy/issues/869#issuecomment-1132141003\n self.pydev_use_scoped_step_frame = False\n self.weak_thread = None\n\n # Purpose: detect if this thread is suspended and actually in the wait loop\n # at this time (otherwise it may be suspended but still didn't reach a point.\n # to pause).\n self.is_in_wait_loop = False\n\n # fmt: off\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n cpdef object _get_related_thread(self):\n # ELSE\n# def _get_related_thread(self):\n # ENDIF\n # fmt: on\n if self.pydev_notify_kill: # Already killed\n return None\n\n if self.weak_thread is None:\n return None\n\n thread = self.weak_thread()\n if thread is None:\n return False\n\n if not is_thread_alive(thread):\n return None\n\n if thread._ident is None: # Can this happen?\n pydev_log.critical("thread._ident is None in _get_related_thread!")\n return None\n\n if threading._active.get(thread._ident) is not thread:\n return None\n\n return thread\n\n # fmt: off\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n cpdef bint _is_stepping(self):\n # ELSE\n# def _is_stepping(self):\n # ENDIF\n # fmt: on\n if self.pydev_state == STATE_RUN and self.pydev_step_cmd != -1:\n # This means actually stepping in a step operation.\n return True\n\n if self.pydev_state == STATE_SUSPEND and self.is_in_wait_loop:\n # This means stepping because it was suspended but still didn't\n # reach a suspension point.\n return True\n\n return False\n\n # fmt: off\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n cpdef get_topmost_frame(self, thread):\n # ELSE\n# def get_topmost_frame(self, thread):\n # ENDIF\n # fmt: on\n """\n Gets the topmost frame for the given thread. Note that it may be None\n and callers should remove the reference to the frame as soon as possible\n to avoid disturbing user code.\n """\n # sys._current_frames(): dictionary with thread id -> topmost frame\n current_frames = _current_frames()\n topmost_frame = current_frames.get(thread._ident)\n if topmost_frame is None:\n # Note: this is expected for dummy threads (so, getting the topmost frame should be\n # treated as optional).\n pydev_log.info(\n "Unable to get topmost frame for thread: %s, thread.ident: %s, id(thread): %s\nCurrent frames: %s.\n" "GEVENT_SUPPORT: %s",\n thread,\n thread.ident,\n id(thread),\n current_frames,\n SUPPORT_GEVENT,\n )\n\n return topmost_frame\n\n # fmt: off\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n cpdef update_stepping_info(self):\n # ELSE\n# def update_stepping_info(self):\n # ENDIF\n # fmt: on\n _update_stepping_info(self)\n\n def __str__(self):\n return "State:%s Stop:%s Cmd: %s Kill:%s" % (self.pydev_state, self.pydev_step_stop, self.pydev_step_cmd, self.pydev_notify_kill)\n\n\n_set_additional_thread_info_lock = ForkSafeLock()\n_next_additional_info = [PyDBAdditionalThreadInfo()]\n\n\n# fmt: off\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\ncpdef set_additional_thread_info(thread):\n# ELSE\n# def set_additional_thread_info(thread):\n# ENDIF\n# fmt: on\n try:\n additional_info = thread.additional_info\n if additional_info is None:\n raise AttributeError()\n except:\n with _set_additional_thread_info_lock:\n # If it's not there, set it within a lock to avoid any racing\n # conditions.\n try:\n additional_info = thread.additional_info\n except:\n additional_info = None\n\n if additional_info is None:\n # Note: don't call PyDBAdditionalThreadInfo constructor at this\n # point as it can piggy-back into the debugger which could\n # get here again, rather get the global ref which was pre-created\n # and add a new entry only after we set thread.additional_info.\n additional_info = _next_additional_info[0]\n thread.additional_info = additional_info\n additional_info.weak_thread = weakref.ref(thread)\n add_additional_info(additional_info)\n del _next_additional_info[:]\n _next_additional_info.append(PyDBAdditionalThreadInfo())\n\n return additional_info\n\n\n# fmt: off\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\ncdef set _all_infos\ncdef set _infos_stepping\ncdef object _update_infos_lock\n# ELSE\n# ENDIF\n# fmt: on\n\n_all_infos = set()\n_infos_stepping = set()\n_update_infos_lock = ForkSafeLock()\n\n\n# fmt: off\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\ncdef _update_stepping_info(PyDBAdditionalThreadInfo info):\n# ELSE\n# def _update_stepping_info(info):\n# ENDIF\n# fmt: on\n\n global _infos_stepping\n global _all_infos\n\n with _update_infos_lock:\n # Removes entries that are no longer valid.\n new_all_infos = set()\n for info in _all_infos:\n if info._get_related_thread() is not None:\n new_all_infos.add(info)\n _all_infos = new_all_infos\n\n new_stepping = set()\n for info in _all_infos:\n if info._is_stepping():\n new_stepping.add(info)\n _infos_stepping = new_stepping\n\n py_db = get_global_debugger()\n if py_db is not None and not py_db.pydb_disposed:\n thread = info.weak_thread()\n if thread is not None:\n thread_id = get_thread_id(thread)\n _queue, event = py_db.get_internal_queue_and_event(thread_id)\n event.set()\n\n# fmt: off\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\ncpdef add_additional_info(PyDBAdditionalThreadInfo info):\n# ELSE\n# def add_additional_info(info):\n# ENDIF\n# fmt: on\n with _update_infos_lock:\n _all_infos.add(info)\n if info._is_stepping():\n _infos_stepping.add(info)\n\n# fmt: off\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\ncpdef remove_additional_info(PyDBAdditionalThreadInfo info):\n# ELSE\n# def remove_additional_info(info):\n# ENDIF\n# fmt: on\n with _update_infos_lock:\n _all_infos.discard(info)\n _infos_stepping.discard(info)\n\n\n# fmt: off\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\ncpdef bint any_thread_stepping():\n# ELSE\n# def any_thread_stepping():\n# ENDIF\n# fmt: on\n return bool(_infos_stepping)\nimport linecache\nimport os.path\nimport re\n\nfrom _pydev_bundle import pydev_log\nfrom _pydevd_bundle import pydevd_dont_trace\nfrom _pydevd_bundle.pydevd_constants import (\n RETURN_VALUES_DICT,\n NO_FTRACE,\n EXCEPTION_TYPE_HANDLED,\n EXCEPTION_TYPE_USER_UNHANDLED,\n PYDEVD_IPYTHON_CONTEXT,\n PYDEVD_USE_SYS_MONITORING,\n)\nfrom _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame, just_raised, remove_exception_from_frame, ignore_exception_trace\nfrom _pydevd_bundle.pydevd_utils import get_clsname_for_code\nfrom pydevd_file_utils import get_abs_path_real_path_and_base_from_frame\nfrom _pydevd_bundle.pydevd_comm_constants import constant_to_str, CMD_SET_FUNCTION_BREAK\nimport sys\n\ntry:\n from _pydevd_bundle.pydevd_bytecode_utils import get_smart_step_into_variant_from_frame_offset\nexcept ImportError:\n\n def get_smart_step_into_variant_from_frame_offset(*args, **kwargs):\n return None\n\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n# ELSE\n# # Note: those are now inlined on cython.\n# 107 = 107\n# 144 = 144\n# 109 = 109\n# 160 = 160\n# 108 = 108\n# 159 = 159\n# 137 = 137\n# 111 = 111\n# 128 = 128\n# 206 = 206\n# 1 = 1\n# 2 = 2\n# ENDIF\n\nbasename = os.path.basename\n\nIGNORE_EXCEPTION_TAG = re.compile("[^#]*#.*@IgnoreException")\nDEBUG_START = ("pydevd.py", "run")\nDEBUG_START_PY3K = ("_pydev_execfile.py", "execfile")\nTRACE_PROPERTY = "pydevd_traceproperty.py"\n\nimport dis\n\ntry:\n StopAsyncIteration\nexcept NameError:\n StopAsyncIteration = StopIteration\n\n\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\ndef is_unhandled_exception(container_obj, py_db, frame, int last_raise_line, set raise_lines):\n# ELSE\n# def is_unhandled_exception(container_obj, py_db, frame, last_raise_line, raise_lines):\n # ENDIF\n if frame.f_lineno in raise_lines:\n return True\n\n else:\n try_except_infos = container_obj.try_except_infos\n if try_except_infos is None:\n container_obj.try_except_infos = try_except_infos = py_db.collect_try_except_info(frame.f_code)\n\n if not try_except_infos:\n # Consider the last exception as unhandled because there's no try..except in it.\n return True\n else:\n # Now, consider only the try..except for the raise\n valid_try_except_infos = []\n for try_except_info in try_except_infos:\n if try_except_info.is_line_in_try_block(last_raise_line):\n valid_try_except_infos.append(try_except_info)\n\n if not valid_try_except_infos:\n return True\n\n else:\n # Note: check all, not only the "valid" ones to cover the case\n # in "tests_python.test_tracing_on_top_level.raise_unhandled10"\n # where one try..except is inside the other with only a raise\n # and it's gotten in the except line.\n for try_except_info in try_except_infos:\n if try_except_info.is_line_in_except_block(frame.f_lineno):\n if frame.f_lineno == try_except_info.except_line or frame.f_lineno in try_except_info.raise_lines_in_except:\n # In a raise inside a try..except block or some except which doesn't\n # match the raised exception.\n return True\n return False\n\n\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\ncdef class _TryExceptContainerObj:\n cdef public list try_except_infos;\n def __init__(self):\n self.try_except_infos = None\n# ELSE\n# class _TryExceptContainerObj(object):\n# """\n# A dumb container object just to contain the try..except info when needed. Meant to be\n# persistent among multiple PyDBFrames to the same code object.\n# """\n# \n# try_except_infos = None\n# \n# ENDIF\n\n\n# =======================================================================================================================\n# PyDBFrame\n# =======================================================================================================================\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\ncdef class PyDBFrame:\n# ELSE\n# class PyDBFrame:\n# """This makes the tracing for a given frame, so, the trace_dispatch\n# is used initially when we enter into a new context ('call') and then\n# is reused for the entire context.\n# """\n# \n # ENDIF\n\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n cdef tuple _args\n cdef int should_skip\n cdef object exc_info\n def __init__(self, tuple args):\n self._args = args # In the cython version we don't need to pass the frame\n self.should_skip = -1 # On cythonized version, put in instance.\n self.exc_info = ()\n # ELSE\n# should_skip = -1 # Default value in class (put in instance on set).\n# exc_info = () # Default value in class (put in instance on set).\n# \n# if PYDEVD_USE_SYS_MONITORING:\n# \n# def __init__(self, *args, **kwargs):\n# raise RuntimeError("Not expected to be used in sys.monitoring.")\n# \n# else:\n# \n# def __init__(self, args):\n# # args = py_db, abs_path_canonical_path_and_base, base, info, t, frame\n# # yeap, much faster than putting in self and then getting it from self later on\n# self._args = args\n # ENDIF\n\n def set_suspend(self, *args, **kwargs):\n self._args[0].set_suspend(*args, **kwargs)\n\n def do_wait_suspend(self, *args, **kwargs):\n self._args[0].do_wait_suspend(*args, **kwargs)\n\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n def trace_exception(self, frame, str event, arg):\n cdef bint should_stop;\n cdef tuple exc_info;\n # ELSE\n# def trace_exception(self, frame, event, arg):\n # ENDIF\n if event == "exception":\n should_stop, frame, exc_info = should_stop_on_exception(self._args[0], self._args[2], frame, self._args[3], arg, self.exc_info)\n self.exc_info = exc_info\n\n if should_stop:\n if handle_exception(self._args[0], self._args[3], frame, arg, EXCEPTION_TYPE_HANDLED):\n return self.trace_dispatch\n\n elif event == "return":\n exc_info = self.exc_info\n if exc_info and arg is None:\n frame_skips_cache, frame_cache_key = self._args[4], self._args[5]\n custom_key = (frame_cache_key, "try_exc_info")\n container_obj = frame_skips_cache.get(custom_key)\n if container_obj is None:\n container_obj = frame_skips_cache[custom_key] = _TryExceptContainerObj()\n if is_unhandled_exception(container_obj, self._args[0], frame, exc_info[1], exc_info[2]) and self.handle_user_exception(\n frame\n ):\n return self.trace_dispatch\n\n return self.trace_exception\n\n def handle_user_exception(self, frame):\n exc_info = self.exc_info\n if exc_info:\n return handle_exception(self._args[0], self._args[3], frame, exc_info[0], EXCEPTION_TYPE_USER_UNHANDLED)\n return False\n\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n cdef get_func_name(self, frame):\n cdef str func_name\n # ELSE\n# def get_func_name(self, frame):\n # ENDIF\n code_obj = frame.f_code\n func_name = code_obj.co_name\n try:\n cls_name = get_clsname_for_code(code_obj, frame)\n if cls_name is not None:\n return "%s.%s" % (cls_name, func_name)\n else:\n return func_name\n except:\n pydev_log.exception()\n return func_name\n\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n cdef _show_return_values(self, frame, arg):\n # ELSE\n# def _show_return_values(self, frame, arg):\n # ENDIF\n try:\n try:\n f_locals_back = getattr(frame.f_back, "f_locals", None)\n if f_locals_back is not None:\n return_values_dict = f_locals_back.get(RETURN_VALUES_DICT, None)\n if return_values_dict is None:\n return_values_dict = {}\n f_locals_back[RETURN_VALUES_DICT] = return_values_dict\n name = self.get_func_name(frame)\n return_values_dict[name] = arg\n except:\n pydev_log.exception()\n finally:\n f_locals_back = None\n\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n cdef _remove_return_values(self, py_db, frame):\n # ELSE\n# def _remove_return_values(self, py_db, frame):\n # ENDIF\n try:\n try:\n # Showing return values was turned off, we should remove them from locals dict.\n # The values can be in the current frame or in the back one\n frame.f_locals.pop(RETURN_VALUES_DICT, None)\n\n f_locals_back = getattr(frame.f_back, "f_locals", None)\n if f_locals_back is not None:\n f_locals_back.pop(RETURN_VALUES_DICT, None)\n except:\n pydev_log.exception()\n finally:\n f_locals_back = None\n\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n cdef _get_unfiltered_back_frame(self, py_db, frame):\n # ELSE\n# def _get_unfiltered_back_frame(self, py_db, frame):\n # ENDIF\n f = frame.f_back\n while f is not None:\n if not py_db.is_files_filter_enabled:\n return f\n\n else:\n if py_db.apply_files_filter(f, f.f_code.co_filename, False):\n f = f.f_back\n\n else:\n return f\n\n return f\n\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n cdef _is_same_frame(self, target_frame, current_frame):\n cdef PyDBAdditionalThreadInfo info;\n # ELSE\n# def _is_same_frame(self, target_frame, current_frame):\n # ENDIF\n if target_frame is current_frame:\n return True\n\n info = self._args[2]\n if info.pydev_use_scoped_step_frame:\n # If using scoped step we don't check the target, we just need to check\n # if the current matches the same heuristic where the target was defined.\n if target_frame is not None and current_frame is not None:\n if target_frame.f_code.co_filename == current_frame.f_code.co_filename:\n # The co_name may be different (it may include the line number), but\n # the filename must still be the same.\n f = current_frame.f_back\n if f is not None and f.f_code.co_name == PYDEVD_IPYTHON_CONTEXT[1]:\n f = f.f_back\n if f is not None and f.f_code.co_name == PYDEVD_IPYTHON_CONTEXT[2]:\n return True\n\n return False\n\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n cpdef trace_dispatch(self, frame, str event, arg):\n cdef tuple abs_path_canonical_path_and_base;\n cdef bint is_exception_event;\n cdef bint has_exception_breakpoints;\n cdef bint can_skip;\n cdef bint stop;\n cdef bint stop_on_plugin_breakpoint;\n cdef PyDBAdditionalThreadInfo info;\n cdef int step_cmd;\n cdef int line;\n cdef bint is_line;\n cdef bint is_call;\n cdef bint is_return;\n cdef bint should_stop;\n cdef dict breakpoints_for_file;\n cdef dict stop_info;\n cdef str curr_func_name;\n cdef dict frame_skips_cache;\n cdef object frame_cache_key;\n cdef tuple line_cache_key;\n cdef int breakpoints_in_line_cache;\n cdef int breakpoints_in_frame_cache;\n cdef bint has_breakpoint_in_frame;\n cdef bint is_coroutine_or_generator;\n cdef int bp_line;\n cdef object bp;\n cdef int pydev_smart_parent_offset\n cdef int pydev_smart_child_offset\n cdef tuple pydev_smart_step_into_variants\n # ELSE\n# def trace_dispatch(self, frame, event, arg):\n # ENDIF\n # Note: this is a big function because most of the logic related to hitting a breakpoint and\n # stepping is contained in it. Ideally this could be split among multiple functions, but the\n # problem in this case is that in pure-python function calls are expensive and even more so\n # when tracing is on (because each function call will get an additional tracing call). We\n # try to address this by using the info.is_tracing for the fastest possible return, but the\n # cost is still high (maybe we could use code-generation in the future and make the code\n # generation be better split among what each part does).\n\n try:\n # DEBUG = '_debugger_case_yield_from.py' in frame.f_code.co_filename\n py_db, abs_path_canonical_path_and_base, info, thread, frame_skips_cache, frame_cache_key = self._args\n # if DEBUG: print('frame trace_dispatch %s %s %s %s %s %s, stop: %s' % (frame.f_lineno, frame.f_code.co_name, frame.f_code.co_filename, event, constant_to_str(info.pydev_step_cmd), arg, info.pydev_step_stop))\n info.is_tracing += 1\n\n # TODO: This shouldn't be needed. The fact that frame.f_lineno\n # is None seems like a bug in Python 3.11.\n # Reported in: https://github.com/python/cpython/issues/94485\n line = frame.f_lineno or 0 # Workaround or case where frame.f_lineno is None\n line_cache_key = (frame_cache_key, line)\n\n if py_db.pydb_disposed:\n return None if event == "call" else NO_FTRACE\n\n plugin_manager = py_db.plugin\n has_exception_breakpoints = (\n py_db.break_on_caught_exceptions or py_db.break_on_user_uncaught_exceptions or py_db.has_plugin_exception_breaks\n )\n\n stop_frame = info.pydev_step_stop\n step_cmd = info.pydev_step_cmd\n function_breakpoint_on_call_event = None\n\n if frame.f_code.co_flags & 0xA0: # 0xa0 == CO_GENERATOR = 0x20 | CO_COROUTINE = 0x80\n # Dealing with coroutines and generators:\n # When in a coroutine we change the perceived event to the debugger because\n # a call, StopIteration exception and return are usually just pausing/unpausing it.\n if event == "line":\n is_line = True\n is_call = False\n is_return = False\n is_exception_event = False\n\n elif event == "return":\n is_line = False\n is_call = False\n is_return = True\n is_exception_event = False\n\n returns_cache_key = (frame_cache_key, "returns")\n return_lines = frame_skips_cache.get(returns_cache_key)\n if return_lines is None:\n # Note: we're collecting the return lines by inspecting the bytecode as\n # there are multiple returns and multiple stop iterations when awaiting and\n # it doesn't give any clear indication when a coroutine or generator is\n # finishing or just pausing.\n return_lines = set()\n for x in py_db.collect_return_info(frame.f_code):\n # Note: cython does not support closures in cpdefs (so we can't use\n # a list comprehension).\n return_lines.add(x.return_line)\n\n frame_skips_cache[returns_cache_key] = return_lines\n\n if line not in return_lines:\n # Not really a return (coroutine/generator paused).\n return self.trace_dispatch\n else:\n if self.exc_info:\n self.handle_user_exception(frame)\n return self.trace_dispatch\n\n # Tricky handling: usually when we're on a frame which is about to exit\n # we set the step mode to step into, but in this case we'd end up in the\n # asyncio internal machinery, which is not what we want, so, we just\n # ask the stop frame to be a level up.\n #\n # Note that there's an issue here which we may want to fix in the future: if\n # the back frame is a frame which is filtered, we won't stop properly.\n # Solving this may not be trivial as we'd need to put a scope in the step\n # in, but we may have to do it anyways to have a step in which doesn't end\n # up in asyncio).\n #\n # Note2: we don't revert to a step in if we're doing scoped stepping\n # (because on scoped stepping we're always receiving a call/line/return\n # event for each line in ipython, so, we can't revert to step in on return\n # as the return shouldn't mean that we've actually completed executing a\n # frame in this case).\n if stop_frame is frame and not info.pydev_use_scoped_step_frame:\n if step_cmd in (108, 159, 107, 144):\n f = self._get_unfiltered_back_frame(py_db, frame)\n if f is not None:\n info.pydev_step_cmd = 206\n info.pydev_step_stop = f\n else:\n if step_cmd == 108:\n info.pydev_step_cmd = 107\n info.pydev_step_stop = None\n\n elif step_cmd == 159:\n info.pydev_step_cmd = 144\n info.pydev_step_stop = None\n\n elif step_cmd == 206:\n # We're exiting this one, so, mark the new coroutine context.\n f = self._get_unfiltered_back_frame(py_db, frame)\n if f is not None:\n info.pydev_step_stop = f\n else:\n info.pydev_step_cmd = 107\n info.pydev_step_stop = None\n\n elif event == "exception":\n breakpoints_for_file = None\n if has_exception_breakpoints:\n should_stop, frame, exc_info = should_stop_on_exception(\n self._args[0], self._args[2], frame, self._args[3], arg, self.exc_info\n )\n self.exc_info = exc_info\n if should_stop:\n if handle_exception(self._args[0], self._args[3], frame, arg, EXCEPTION_TYPE_HANDLED):\n return self.trace_dispatch\n\n return self.trace_dispatch\n else:\n # event == 'call' or event == 'c_XXX'\n return self.trace_dispatch\n\n else: # Not coroutine nor generator\n if event == "line":\n is_line = True\n is_call = False\n is_return = False\n is_exception_event = False\n\n elif event == "return":\n is_line = False\n is_return = True\n is_call = False\n is_exception_event = False\n\n # If we are in single step mode and something causes us to exit the current frame, we need to make sure we break\n # eventually. Force the step mode to step into and the step stop frame to None.\n # I.e.: F6 in the end of a function should stop in the next possible position (instead of forcing the user\n # to make a step in or step over at that location).\n # Note: this is especially troublesome when we're skipping code with the\n # @DontTrace comment.\n if (\n stop_frame is frame\n and not info.pydev_use_scoped_step_frame\n and is_return\n and step_cmd\n in (108, 109, 159, 160, 128)\n ):\n if step_cmd in (108, 109, 128):\n info.pydev_step_cmd = 107\n else:\n info.pydev_step_cmd = 144\n info.pydev_step_stop = None\n\n if self.exc_info:\n if self.handle_user_exception(frame):\n return self.trace_dispatch\n\n elif event == "call":\n is_line = False\n is_call = True\n is_return = False\n is_exception_event = False\n if frame.f_code.co_firstlineno == frame.f_lineno: # Check line to deal with async/await.\n function_breakpoint_on_call_event = py_db.function_breakpoint_name_to_breakpoint.get(frame.f_code.co_name)\n\n elif event == "exception":\n is_exception_event = True\n breakpoints_for_file = None\n if has_exception_breakpoints:\n should_stop, frame, exc_info = should_stop_on_exception(\n self._args[0], self._args[2], frame, self._args[3], arg, self.exc_info\n )\n self.exc_info = exc_info\n if should_stop:\n if handle_exception(self._args[0], self._args[3], frame, arg, EXCEPTION_TYPE_HANDLED):\n return self.trace_dispatch\n is_line = False\n is_return = False\n is_call = False\n\n else:\n # Unexpected: just keep the same trace func (i.e.: event == 'c_XXX').\n return self.trace_dispatch\n\n if not is_exception_event:\n breakpoints_for_file = py_db.breakpoints.get(abs_path_canonical_path_and_base[1])\n\n can_skip = False\n\n if info.pydev_state == 1: # 1 = 1\n # we can skip if:\n # - we have no stop marked\n # - we should make a step return/step over and we're not in the current frame\n # - we're stepping into a coroutine context and we're not in that context\n if step_cmd == -1:\n can_skip = True\n\n elif step_cmd in (\n 108,\n 109,\n 159,\n 160,\n ) and not self._is_same_frame(stop_frame, frame):\n can_skip = True\n\n elif step_cmd == 128 and (\n stop_frame is not None\n and stop_frame is not frame\n and stop_frame is not frame.f_back\n and (frame.f_back is None or stop_frame is not frame.f_back.f_back)\n ):\n can_skip = True\n\n elif step_cmd == 144:\n if py_db.apply_files_filter(frame, frame.f_code.co_filename, True) and (\n frame.f_back is None or py_db.apply_files_filter(frame.f_back, frame.f_back.f_code.co_filename, True)\n ):\n can_skip = True\n\n elif step_cmd == 206:\n f = frame\n while f is not None:\n if self._is_same_frame(stop_frame, f):\n break\n f = f.f_back\n else:\n can_skip = True\n\n if can_skip:\n if plugin_manager is not None and (py_db.has_plugin_line_breaks or py_db.has_plugin_exception_breaks):\n can_skip = plugin_manager.can_skip(py_db, frame)\n\n if (\n can_skip\n and py_db.show_return_values\n and info.pydev_step_cmd in (108, 159)\n and self._is_same_frame(stop_frame, frame.f_back)\n ):\n # trace function for showing return values after step over\n can_skip = False\n\n # Let's check to see if we are in a function that has a breakpoint. If we don't have a breakpoint,\n # we will return nothing for the next trace\n # also, after we hit a breakpoint and go to some other debugging state, we have to force the set trace anyway,\n # so, that's why the additional checks are there.\n\n if function_breakpoint_on_call_event:\n pass # Do nothing here (just keep on going as we can't skip it).\n\n elif not breakpoints_for_file:\n if can_skip:\n if has_exception_breakpoints:\n return self.trace_exception\n else:\n return None if is_call else NO_FTRACE\n\n else:\n # When cached, 0 means we don't have a breakpoint and 1 means we have.\n if can_skip:\n breakpoints_in_line_cache = frame_skips_cache.get(line_cache_key, -1)\n if breakpoints_in_line_cache == 0:\n return self.trace_dispatch\n\n breakpoints_in_frame_cache = frame_skips_cache.get(frame_cache_key, -1)\n if breakpoints_in_frame_cache != -1:\n # Gotten from cache.\n has_breakpoint_in_frame = breakpoints_in_frame_cache == 1\n\n else:\n has_breakpoint_in_frame = False\n\n try:\n func_lines = set()\n for offset_and_lineno in dis.findlinestarts(frame.f_code):\n if offset_and_lineno[1] is not None:\n func_lines.add(offset_and_lineno[1])\n except:\n # This is a fallback for implementations where we can't get the function\n # lines -- i.e.: jython (in this case clients need to provide the function\n # name to decide on the skip or we won't be able to skip the function\n # completely).\n\n # Checks the breakpoint to see if there is a context match in some function.\n curr_func_name = frame.f_code.co_name\n\n # global context is set with an empty name\n if curr_func_name in ("?", "<module>", "<lambda>"):\n curr_func_name = ""\n\n for bp in breakpoints_for_file.values():\n # will match either global or some function\n if bp.func_name in ("None", curr_func_name):\n has_breakpoint_in_frame = True\n break\n else:\n for bp_line in breakpoints_for_file: # iterate on keys\n if bp_line in func_lines:\n has_breakpoint_in_frame = True\n break\n\n # Cache the value (1 or 0 or -1 for default because of cython).\n if has_breakpoint_in_frame:\n frame_skips_cache[frame_cache_key] = 1\n else:\n frame_skips_cache[frame_cache_key] = 0\n\n if can_skip and not has_breakpoint_in_frame:\n if has_exception_breakpoints:\n return self.trace_exception\n else:\n return None if is_call else NO_FTRACE\n\n # We may have hit a breakpoint or we are already in step mode. Either way, let's check what we should do in this frame\n # if DEBUG: print('NOT skipped: %s %s %s %s' % (frame.f_lineno, frame.f_code.co_name, event, frame.__class__.__name__))\n\n try:\n stop_on_plugin_breakpoint = False\n # return is not taken into account for breakpoint hit because we'd have a double-hit in this case\n # (one for the line and the other for the return).\n\n stop_info = {}\n breakpoint = None\n stop = False\n stop_reason = 111\n bp_type = None\n\n if function_breakpoint_on_call_event:\n breakpoint = function_breakpoint_on_call_event\n stop = True\n new_frame = frame\n stop_reason = CMD_SET_FUNCTION_BREAK\n\n elif is_line and info.pydev_state != 2 and breakpoints_for_file is not None and line in breakpoints_for_file:\n breakpoint = breakpoints_for_file[line]\n new_frame = frame\n stop = True\n\n elif plugin_manager is not None and py_db.has_plugin_line_breaks:\n result = plugin_manager.get_breakpoint(py_db, frame, event, self._args[2])\n if result:\n stop_on_plugin_breakpoint = True\n breakpoint, new_frame, bp_type = result\n\n if breakpoint:\n # ok, hit breakpoint, now, we have to discover if it is a conditional breakpoint\n # lets do the conditional stuff here\n if breakpoint.expression is not None:\n py_db.handle_breakpoint_expression(breakpoint, info, new_frame)\n\n if stop or stop_on_plugin_breakpoint:\n eval_result = False\n if breakpoint.has_condition:\n eval_result = py_db.handle_breakpoint_condition(info, breakpoint, new_frame)\n if not eval_result:\n stop = False\n stop_on_plugin_breakpoint = False\n\n if is_call and (\n frame.f_code.co_name in ("<lambda>", "<module>") or (line == 1 and frame.f_code.co_name.startswith("<cell"))\n ):\n # If we find a call for a module, it means that the module is being imported/executed for the\n # first time. In this case we have to ignore this hit as it may later duplicated by a\n # line event at the same place (so, if there's a module with a print() in the first line\n # the user will hit that line twice, which is not what we want).\n #\n # For lambda, as it only has a single statement, it's not interesting to trace\n # its call and later its line event as they're usually in the same line.\n #\n # For ipython, <cell xxx> may be executed having each line compiled as a new\n # module, so it's the same case as <module>.\n\n return self.trace_dispatch\n\n # Handle logpoint (on a logpoint we should never stop).\n if (stop or stop_on_plugin_breakpoint) and breakpoint.is_logpoint:\n stop = False\n stop_on_plugin_breakpoint = False\n\n if info.pydev_message is not None and len(info.pydev_message) > 0:\n cmd = py_db.cmd_factory.make_io_message(info.pydev_message + os.linesep, "1")\n py_db.writer.add_command(cmd)\n\n if py_db.show_return_values:\n if is_return and (\n (\n info.pydev_step_cmd in (108, 159, 128)\n and (self._is_same_frame(stop_frame, frame.f_back))\n )\n or (info.pydev_step_cmd in (109, 160) and (self._is_same_frame(stop_frame, frame)))\n or (info.pydev_step_cmd in (107, 206))\n or (\n info.pydev_step_cmd == 144\n and frame.f_back is not None\n and not py_db.apply_files_filter(frame.f_back, frame.f_back.f_code.co_filename, True)\n )\n ):\n self._show_return_values(frame, arg)\n\n elif py_db.remove_return_values_flag:\n try:\n self._remove_return_values(py_db, frame)\n finally:\n py_db.remove_return_values_flag = False\n\n if stop:\n self.set_suspend(\n thread,\n stop_reason,\n suspend_other_threads=breakpoint and breakpoint.suspend_policy == "ALL",\n )\n\n elif stop_on_plugin_breakpoint and plugin_manager is not None:\n result = plugin_manager.suspend(py_db, thread, frame, bp_type)\n if result:\n frame = result\n\n # if thread has a suspend flag, we suspend with a busy wait\n if info.pydev_state == 2:\n self.do_wait_suspend(thread, frame, event, arg)\n return self.trace_dispatch\n else:\n if not breakpoint and is_line:\n # No stop from anyone and no breakpoint found in line (cache that).\n frame_skips_cache[line_cache_key] = 0\n\n except:\n # Unfortunately Python itself stops the tracing when it originates from\n # the tracing function, so, we can't do much about it (just let the user know).\n exc = sys.exc_info()[0]\n cmd = py_db.cmd_factory.make_console_message(\n "%s raised from within the callback set in sys.settrace.\nDebugging will be disabled for this thread (%s).\n"\n % (\n exc,\n thread,\n )\n )\n py_db.writer.add_command(cmd)\n if not issubclass(exc, (KeyboardInterrupt, SystemExit)):\n pydev_log.exception()\n\n raise\n\n # step handling. We stop when we hit the right frame\n try:\n should_skip = 0\n if pydevd_dont_trace.should_trace_hook is not None:\n if self.should_skip == -1:\n # I.e.: cache the result on self.should_skip (no need to evaluate the same frame multiple times).\n # Note that on a code reload, we won't re-evaluate this because in practice, the frame.f_code\n # Which will be handled by this frame is read-only, so, we can cache it safely.\n if not pydevd_dont_trace.should_trace_hook(frame.f_code, abs_path_canonical_path_and_base[0]):\n # -1, 0, 1 to be Cython-friendly\n should_skip = self.should_skip = 1\n else:\n should_skip = self.should_skip = 0\n else:\n should_skip = self.should_skip\n\n plugin_stop = False\n if should_skip:\n stop = False\n\n elif step_cmd in (107, 144, 206):\n force_check_project_scope = step_cmd == 144\n if is_line:\n if not info.pydev_use_scoped_step_frame:\n if force_check_project_scope or py_db.is_files_filter_enabled:\n stop = not py_db.apply_files_filter(frame, frame.f_code.co_filename, force_check_project_scope)\n else:\n stop = True\n else:\n if force_check_project_scope or py_db.is_files_filter_enabled:\n # Make sure we check the filtering inside ipython calls too...\n if not not py_db.apply_files_filter(frame, frame.f_code.co_filename, force_check_project_scope):\n return None if is_call else NO_FTRACE\n\n # We can only stop inside the ipython call.\n filename = frame.f_code.co_filename\n if filename.endswith(".pyc"):\n filename = filename[:-1]\n\n if not filename.endswith(PYDEVD_IPYTHON_CONTEXT[0]):\n f = frame.f_back\n while f is not None:\n if f.f_code.co_name == PYDEVD_IPYTHON_CONTEXT[1]:\n f2 = f.f_back\n if f2 is not None and f2.f_code.co_name == PYDEVD_IPYTHON_CONTEXT[2]:\n pydev_log.debug("Stop inside ipython call")\n stop = True\n break\n f = f.f_back\n\n del f\n\n if not stop:\n # In scoped mode if step in didn't work in this context it won't work\n # afterwards anyways.\n return None if is_call else NO_FTRACE\n\n elif is_return and frame.f_back is not None and not info.pydev_use_scoped_step_frame:\n if py_db.get_file_type(frame.f_back) == py_db.PYDEV_FILE:\n stop = False\n else:\n if force_check_project_scope or py_db.is_files_filter_enabled:\n stop = not py_db.apply_files_filter(\n frame.f_back, frame.f_back.f_code.co_filename, force_check_project_scope\n )\n if stop:\n # Prevent stopping in a return to the same location we were initially\n # (i.e.: double-stop at the same place due to some filtering).\n if info.step_in_initial_location == (frame.f_back, frame.f_back.f_lineno):\n stop = False\n else:\n stop = True\n else:\n stop = False\n\n if stop:\n if step_cmd == 206:\n # i.e.: Check if we're stepping into the proper context.\n f = frame\n while f is not None:\n if self._is_same_frame(stop_frame, f):\n break\n f = f.f_back\n else:\n stop = False\n\n if plugin_manager is not None:\n result = plugin_manager.cmd_step_into(py_db, frame, event, self._args[2], self._args[3], stop_info, stop)\n if result:\n stop, plugin_stop = result\n\n elif step_cmd in (108, 159):\n # Note: when dealing with a step over my code it's the same as a step over (the\n # difference is that when we return from a frame in one we go to regular step\n # into and in the other we go to a step into my code).\n stop = self._is_same_frame(stop_frame, frame) and is_line\n # Note: don't stop on a return for step over, only for line events\n # i.e.: don't stop in: (stop_frame is frame.f_back and is_return) as we'd stop twice in that line.\n\n if plugin_manager is not None:\n result = plugin_manager.cmd_step_over(py_db, frame, event, self._args[2], self._args[3], stop_info, stop)\n if result:\n stop, plugin_stop = result\n\n elif step_cmd == 128:\n stop = False\n back = frame.f_back\n if self._is_same_frame(stop_frame, frame) and is_return:\n # We're exiting the smart step into initial frame (so, we probably didn't find our target).\n stop = True\n\n elif self._is_same_frame(stop_frame, back) and is_line:\n if info.pydev_smart_child_offset != -1:\n # i.e.: in this case, we're not interested in the pause in the parent, rather\n # we're interested in the pause in the child (when the parent is at the proper place).\n stop = False\n\n else:\n pydev_smart_parent_offset = info.pydev_smart_parent_offset\n\n pydev_smart_step_into_variants = info.pydev_smart_step_into_variants\n if pydev_smart_parent_offset >= 0 and pydev_smart_step_into_variants:\n # Preferred mode (when the smart step into variants are available\n # and the offset is set).\n stop = get_smart_step_into_variant_from_frame_offset(\n back.f_lasti, pydev_smart_step_into_variants\n ) is get_smart_step_into_variant_from_frame_offset(\n pydev_smart_parent_offset, pydev_smart_step_into_variants\n )\n\n else:\n # Only the name/line is available, so, check that.\n curr_func_name = frame.f_code.co_name\n\n # global context is set with an empty name\n if curr_func_name in ("?", "<module>") or curr_func_name is None:\n curr_func_name = ""\n if curr_func_name == info.pydev_func_name and stop_frame.f_lineno == info.pydev_next_line:\n stop = True\n\n if not stop:\n # In smart step into, if we didn't hit it in this frame once, that'll\n # not be the case next time either, so, disable tracing for this frame.\n return None if is_call else NO_FTRACE\n\n elif back is not None and self._is_same_frame(stop_frame, back.f_back) and is_line:\n # Ok, we have to track 2 stops at this point, the parent and the child offset.\n # This happens when handling a step into which targets a function inside a list comprehension\n # or generator (in which case an intermediary frame is created due to an internal function call).\n pydev_smart_parent_offset = info.pydev_smart_parent_offset\n pydev_smart_child_offset = info.pydev_smart_child_offset\n # print('matched back frame', pydev_smart_parent_offset, pydev_smart_child_offset)\n # print('parent f_lasti', back.f_back.f_lasti)\n # print('child f_lasti', back.f_lasti)\n stop = False\n if pydev_smart_child_offset >= 0 and pydev_smart_child_offset >= 0:\n pydev_smart_step_into_variants = info.pydev_smart_step_into_variants\n\n if pydev_smart_parent_offset >= 0 and pydev_smart_step_into_variants:\n # Note that we don't really check the parent offset, only the offset of\n # the child (because this is a generator, the parent may have moved forward\n # already -- and that's ok, so, we just check that the parent frame\n # matches in this case).\n smart_step_into_variant = get_smart_step_into_variant_from_frame_offset(\n pydev_smart_parent_offset, pydev_smart_step_into_variants\n )\n # print('matched parent offset', pydev_smart_parent_offset)\n # Ok, now, check the child variant\n children_variants = smart_step_into_variant.children_variants\n stop = children_variants and (\n get_smart_step_into_variant_from_frame_offset(back.f_lasti, children_variants)\n is get_smart_step_into_variant_from_frame_offset(pydev_smart_child_offset, children_variants)\n )\n # print('stop at child', stop)\n\n if not stop:\n # In smart step into, if we didn't hit it in this frame once, that'll\n # not be the case next time either, so, disable tracing for this frame.\n return None if is_call else NO_FTRACE\n\n elif step_cmd in (109, 160):\n stop = is_return and self._is_same_frame(stop_frame, frame)\n\n else:\n stop = False\n\n if stop and step_cmd != -1 and is_return and hasattr(frame, "f_back"):\n f_code = getattr(frame.f_back, "f_code", None)\n if f_code is not None:\n if py_db.get_file_type(frame.f_back) == py_db.PYDEV_FILE:\n stop = False\n\n if plugin_stop:\n plugin_manager.stop(py_db, frame, event, self._args[3], stop_info, arg, step_cmd)\n elif stop:\n if is_line:\n self.set_suspend(thread, step_cmd, original_step_cmd=info.pydev_original_step_cmd)\n self.do_wait_suspend(thread, frame, event, arg)\n elif is_return: # return event\n back = frame.f_back\n if back is not None:\n # When we get to the pydevd run function, the debugging has actually finished for the main thread\n # (note that it can still go on for other threads, but for this one, we just make it finish)\n # So, just setting it to None should be OK\n back_absolute_filename, _, base = get_abs_path_real_path_and_base_from_frame(back)\n if (base, back.f_code.co_name) in (DEBUG_START, DEBUG_START_PY3K):\n back = None\n\n elif base == TRACE_PROPERTY:\n # We dont want to trace the return event of pydevd_traceproperty (custom property for debugging)\n # if we're in a return, we want it to appear to the user in the previous frame!\n return None if is_call else NO_FTRACE\n\n elif pydevd_dont_trace.should_trace_hook is not None:\n if not pydevd_dont_trace.should_trace_hook(back.f_code, back_absolute_filename):\n # In this case, we'll have to skip the previous one because it shouldn't be traced.\n # Also, we have to reset the tracing, because if the parent's parent (or some\n # other parent) has to be traced and it's not currently, we wouldn't stop where\n # we should anymore (so, a step in/over/return may not stop anywhere if no parent is traced).\n # Related test: _debugger_case17a.py\n py_db.set_trace_for_frame_and_parents(thread.ident, back)\n return None if is_call else NO_FTRACE\n\n if back is not None:\n # if we're in a return, we want it to appear to the user in the previous frame!\n self.set_suspend(thread, step_cmd, original_step_cmd=info.pydev_original_step_cmd)\n self.do_wait_suspend(thread, back, event, arg)\n else:\n # in jython we may not have a back frame\n info.pydev_step_stop = None\n info.pydev_original_step_cmd = -1\n info.pydev_step_cmd = -1\n info.pydev_state = 1\n info.update_stepping_info()\n\n # if we are quitting, let's stop the tracing\n if py_db.quitting:\n return None if is_call else NO_FTRACE\n\n return self.trace_dispatch\n except:\n # Unfortunately Python itself stops the tracing when it originates from\n # the tracing function, so, we can't do much about it (just let the user know).\n exc = sys.exc_info()[0]\n cmd = py_db.cmd_factory.make_console_message(\n "%s raised from within the callback set in sys.settrace.\nDebugging will be disabled for this thread (%s).\n"\n % (\n exc,\n thread,\n )\n )\n py_db.writer.add_command(cmd)\n if not issubclass(exc, (KeyboardInterrupt, SystemExit)):\n pydev_log.exception()\n raise\n\n finally:\n info.is_tracing -= 1\n\n # end trace_dispatch\n\n\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\ndef should_stop_on_exception(py_db, PyDBAdditionalThreadInfo info, frame, thread, arg, prev_user_uncaught_exc_info, is_unwind=False):\n cdef bint should_stop;\n cdef bint was_just_raised;\n cdef list check_excs;\n# ELSE\n# def should_stop_on_exception(py_db, info, frame, thread, arg, prev_user_uncaught_exc_info, is_unwind=False):\n # ENDIF\n\n should_stop = False\n maybe_user_uncaught_exc_info = prev_user_uncaught_exc_info\n\n # 2 = 2\n if info.pydev_state != 2: # and breakpoint is not None:\n exception, value, trace = arg\n\n if trace is not None and hasattr(trace, "tb_next"):\n # on jython trace is None on the first event and it may not have a tb_next.\n\n should_stop = False\n exception_breakpoint = None\n try:\n if py_db.plugin is not None:\n result = py_db.plugin.exception_break(py_db, frame, thread, arg, is_unwind)\n if result:\n should_stop, frame = result\n except:\n pydev_log.exception()\n\n if not should_stop:\n # Apply checks that don't need the exception breakpoint (where we shouldn't ever stop).\n if exception == SystemExit and py_db.ignore_system_exit_code(value):\n pass\n\n elif exception in (GeneratorExit, StopIteration, StopAsyncIteration):\n # These exceptions are control-flow related (they work as a generator\n # pause), so, we shouldn't stop on them.\n pass\n\n elif ignore_exception_trace(trace):\n pass\n\n else:\n was_just_raised = trace.tb_next is None\n\n # It was not handled by any plugin, lets check exception breakpoints.\n check_excs = []\n\n # Note: check user unhandled before regular exceptions.\n exc_break_user = py_db.get_exception_breakpoint(exception, py_db.break_on_user_uncaught_exceptions)\n if exc_break_user is not None:\n check_excs.append((exc_break_user, True))\n\n exc_break_caught = py_db.get_exception_breakpoint(exception, py_db.break_on_caught_exceptions)\n if exc_break_caught is not None:\n check_excs.append((exc_break_caught, False))\n\n for exc_break, is_user_uncaught in check_excs:\n # Initially mark that it should stop and then go into exclusions.\n should_stop = True\n\n if py_db.exclude_exception_by_filter(exc_break, trace):\n pydev_log.debug(\n "Ignore exception %s in library %s -- (%s)" % (exception, frame.f_code.co_filename, frame.f_code.co_name)\n )\n should_stop = False\n\n elif exc_break.condition is not None and not py_db.handle_breakpoint_condition(info, exc_break, frame):\n should_stop = False\n\n elif is_user_uncaught:\n # Note: we don't stop here, we just collect the exc_info to use later on...\n should_stop = False\n if not py_db.apply_files_filter(frame, frame.f_code.co_filename, True) and (\n frame.f_back is None or py_db.apply_files_filter(frame.f_back, frame.f_back.f_code.co_filename, True)\n ):\n # User uncaught means that we're currently in user code but the code\n # up the stack is library code.\n exc_info = prev_user_uncaught_exc_info\n if not exc_info:\n exc_info = (arg, frame.f_lineno, set([frame.f_lineno]))\n else:\n lines = exc_info[2]\n lines.add(frame.f_lineno)\n exc_info = (arg, frame.f_lineno, lines)\n maybe_user_uncaught_exc_info = exc_info\n else:\n # I.e.: these are only checked if we're not dealing with user uncaught exceptions.\n if (\n exc_break.notify_on_first_raise_only\n and py_db.skip_on_exceptions_thrown_in_same_context\n and not was_just_raised\n and not just_raised(trace.tb_next)\n ):\n # In this case we never stop if it was just raised, so, to know if it was the first we\n # need to check if we're in the 2nd method.\n should_stop = False # I.e.: we stop only when we're at the caller of a method that throws an exception\n\n elif (\n exc_break.notify_on_first_raise_only\n and not py_db.skip_on_exceptions_thrown_in_same_context\n and not was_just_raised\n ):\n should_stop = False # I.e.: we stop only when it was just raised\n\n elif was_just_raised and py_db.skip_on_exceptions_thrown_in_same_context:\n # Option: Don't break if an exception is caught in the same function from which it is thrown\n should_stop = False\n\n if should_stop:\n exception_breakpoint = exc_break\n try:\n info.pydev_message = exc_break.qname\n except:\n info.pydev_message = exc_break.qname.encode("utf-8")\n break\n\n if should_stop:\n # Always add exception to frame (must remove later after we proceed).\n add_exception_to_frame(frame, (exception, value, trace))\n\n if exception_breakpoint is not None and exception_breakpoint.expression is not None:\n py_db.handle_breakpoint_expression(exception_breakpoint, info, frame)\n\n return should_stop, frame, maybe_user_uncaught_exc_info\n\n\n# Same thing in the main debugger but only considering the file contents, while the one in the main debugger\n# considers the user input (so, the actual result must be a join of both).\nfilename_to_lines_where_exceptions_are_ignored: dict = {}\nfilename_to_stat_info: dict = {}\n\n\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\ndef handle_exception(py_db, thread, frame, arg, str exception_type):\n cdef bint stopped;\n cdef tuple abs_real_path_and_base;\n cdef str absolute_filename;\n cdef str canonical_normalized_filename;\n cdef dict lines_ignored;\n cdef dict frame_id_to_frame;\n cdef dict merged;\n cdef object trace_obj;\n# ELSE\n# def handle_exception(py_db, thread, frame, arg, exception_type):\n # ENDIF\n stopped = False\n try:\n # print('handle_exception', frame.f_lineno, frame.f_code.co_name)\n\n # We have 3 things in arg: exception type, description, traceback object\n trace_obj = arg[2]\n\n initial_trace_obj = trace_obj\n if trace_obj.tb_next is None and trace_obj.tb_frame is frame:\n # I.e.: tb_next should be only None in the context it was thrown (trace_obj.tb_frame is frame is just a double check).\n pass\n else:\n # Get the trace_obj from where the exception was raised...\n while trace_obj.tb_next is not None:\n trace_obj = trace_obj.tb_next\n\n if py_db.ignore_exceptions_thrown_in_lines_with_ignore_exception:\n for check_trace_obj in (initial_trace_obj, trace_obj):\n abs_real_path_and_base = get_abs_path_real_path_and_base_from_frame(check_trace_obj.tb_frame)\n absolute_filename = abs_real_path_and_base[0]\n canonical_normalized_filename = abs_real_path_and_base[1]\n\n lines_ignored = filename_to_lines_where_exceptions_are_ignored.get(canonical_normalized_filename)\n if lines_ignored is None:\n lines_ignored = filename_to_lines_where_exceptions_are_ignored[canonical_normalized_filename] = {}\n\n try:\n curr_stat = os.stat(absolute_filename)\n curr_stat = (curr_stat.st_size, curr_stat.st_mtime)\n except:\n curr_stat = None\n\n last_stat = filename_to_stat_info.get(absolute_filename)\n if last_stat != curr_stat:\n filename_to_stat_info[absolute_filename] = curr_stat\n lines_ignored.clear()\n try:\n linecache.checkcache(absolute_filename)\n except:\n pydev_log.exception("Error in linecache.checkcache(%r)", absolute_filename)\n\n from_user_input = py_db.filename_to_lines_where_exceptions_are_ignored.get(canonical_normalized_filename)\n if from_user_input:\n merged = {}\n merged.update(lines_ignored)\n # Override what we have with the related entries that the user entered\n merged.update(from_user_input)\n else:\n merged = lines_ignored\n\n exc_lineno = check_trace_obj.tb_lineno\n\n # print ('lines ignored', lines_ignored)\n # print ('user input', from_user_input)\n # print ('merged', merged, 'curr', exc_lineno)\n\n if exc_lineno not in merged: # Note: check on merged but update lines_ignored.\n try:\n line = linecache.getline(absolute_filename, exc_lineno, check_trace_obj.tb_frame.f_globals)\n except:\n pydev_log.exception("Error in linecache.getline(%r, %s, f_globals)", absolute_filename, exc_lineno)\n line = ""\n\n if IGNORE_EXCEPTION_TAG.match(line) is not None:\n lines_ignored[exc_lineno] = 1\n return False\n else:\n # Put in the cache saying not to ignore\n lines_ignored[exc_lineno] = 0\n else:\n # Ok, dict has it already cached, so, let's check it...\n if merged.get(exc_lineno, 0):\n return False\n\n try:\n frame_id_to_frame = {}\n frame_id_to_frame[id(frame)] = frame\n f = trace_obj.tb_frame\n while f is not None:\n frame_id_to_frame[id(f)] = f\n f = f.f_back\n f = None\n\n stopped = True\n py_db.send_caught_exception_stack(thread, arg, id(frame))\n try:\n py_db.set_suspend(thread, 137)\n py_db.do_wait_suspend(thread, frame, "exception", arg, exception_type=exception_type)\n finally:\n py_db.send_caught_exception_stack_proceeded(thread)\n except:\n pydev_log.exception()\n\n py_db.set_trace_for_frame_and_parents(thread.ident, frame)\n finally:\n # Make sure the user cannot see the '__exception__' we added after we leave the suspend state.\n remove_exception_from_frame(frame)\n # Clear some local variables...\n frame = None\n trace_obj = None\n initial_trace_obj = None\n check_trace_obj = None\n f = None\n frame_id_to_frame = None\n py_db = None\n thread = None\n\n return stopped\nfrom _pydev_bundle.pydev_is_thread_alive import is_thread_alive\nfrom _pydev_bundle.pydev_log import exception as pydev_log_exception\nfrom _pydev_bundle._pydev_saved_modules import threading\nfrom _pydevd_bundle.pydevd_constants import (\n get_current_thread_id,\n NO_FTRACE,\n USE_CUSTOM_SYS_CURRENT_FRAMES_MAP,\n ForkSafeLock,\n PYDEVD_USE_SYS_MONITORING,\n)\nfrom pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER\n\n# fmt: off\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\nfrom cpython.object cimport PyObject\nfrom cpython.ref cimport Py_INCREF, Py_XDECREF\n# ELSE\n# from _pydevd_bundle.pydevd_frame import PyDBFrame, is_unhandled_exception\n# ENDIF\n# fmt: on\n\n# fmt: off\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\ncdef dict _global_notify_skipped_step_in\n# ELSE\n# # Note: those are now inlined on cython.\n# 107 = 107\n# 144 = 144\n# 109 = 109\n# 160 = 160\n# ENDIF\n# fmt: on\n\n# Cache where we should keep that we completely skipped entering some context.\n# It needs to be invalidated when:\n# - Breakpoints are changed\n# It can be used when running regularly (without step over/step in/step return)\nglobal_cache_skips = {}\nglobal_cache_frame_skips = {}\n\n_global_notify_skipped_step_in = False\n_global_notify_skipped_step_in_lock = ForkSafeLock()\n\n\ndef notify_skipped_step_in_because_of_filters(py_db, frame):\n global _global_notify_skipped_step_in\n\n with _global_notify_skipped_step_in_lock:\n if _global_notify_skipped_step_in:\n # Check with lock in place (callers should actually have checked\n # before without the lock in place due to performance).\n return\n _global_notify_skipped_step_in = True\n py_db.notify_skipped_step_in_because_of_filters(frame)\n\n\n# fmt: off\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\ncdef class SafeCallWrapper:\n cdef method_object\n def __init__(self, method_object):\n self.method_object = method_object\n def __call__(self, *args):\n #Cannot use 'self' once inside the delegate call since we are borrowing the self reference f_trace field\n #in the frame, and that reference might get destroyed by set trace on frame and parents\n cdef PyObject* method_obj = <PyObject*> self.method_object\n Py_INCREF(<object>method_obj)\n ret = (<object>method_obj)(*args)\n Py_XDECREF (method_obj)\n return SafeCallWrapper(ret) if ret is not None else None\n def get_method_object(self):\n return self.method_object\n# ELSE\n# ENDIF\n# fmt: on\n\n\ndef fix_top_level_trace_and_get_trace_func(py_db, frame):\n # fmt: off\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n cdef str filename;\n cdef str name;\n cdef tuple args;\n # ENDIF\n # fmt: on\n\n # Note: this is always the first entry-point in the tracing for any thread.\n # After entering here we'll set a new tracing function for this thread\n # where more information is cached (and will also setup the tracing for\n # frames where we should deal with unhandled exceptions).\n thread = None\n # Cache the frame which should be traced to deal with unhandled exceptions.\n # (i.e.: thread entry-points).\n\n f_unhandled = frame\n # print('called at', f_unhandled.f_code.co_name, f_unhandled.f_code.co_filename, f_unhandled.f_code.co_firstlineno)\n force_only_unhandled_tracer = False\n while f_unhandled is not None:\n # name = splitext(basename(f_unhandled.f_code.co_filename))[0]\n\n name = f_unhandled.f_code.co_filename\n # basename\n i = name.rfind("/")\n j = name.rfind("\\")\n if j > i:\n i = j\n if i >= 0:\n name = name[i + 1 :]\n # remove ext\n i = name.rfind(".")\n if i >= 0:\n name = name[:i]\n\n if name == "threading":\n if f_unhandled.f_code.co_name in ("__bootstrap", "_bootstrap"):\n # We need __bootstrap_inner, not __bootstrap.\n return None, False\n\n elif f_unhandled.f_code.co_name in ("__bootstrap_inner", "_bootstrap_inner"):\n # Note: be careful not to use threading.currentThread to avoid creating a dummy thread.\n t = f_unhandled.f_locals.get("self")\n force_only_unhandled_tracer = True\n if t is not None and isinstance(t, threading.Thread):\n thread = t\n break\n\n elif name == "pydev_monkey":\n if f_unhandled.f_code.co_name == "__call__":\n force_only_unhandled_tracer = True\n break\n\n elif name == "pydevd":\n if f_unhandled.f_code.co_name in ("run", "main"):\n # We need to get to _exec\n return None, False\n\n if f_unhandled.f_code.co_name == "_exec":\n force_only_unhandled_tracer = True\n break\n\n elif name == "pydevd_tracing":\n return None, False\n\n elif f_unhandled.f_back is None:\n break\n\n f_unhandled = f_unhandled.f_back\n\n if thread is None:\n # Important: don't call threadingCurrentThread if we're in the threading module\n # to avoid creating dummy threads.\n if py_db.threading_get_ident is not None:\n thread = py_db.threading_active.get(py_db.threading_get_ident())\n if thread is None:\n return None, False\n else:\n # Jython does not have threading.get_ident().\n thread = py_db.threading_current_thread()\n\n if getattr(thread, "pydev_do_not_trace", None):\n py_db.disable_tracing()\n return None, False\n\n try:\n additional_info = thread.additional_info\n if additional_info is None:\n raise AttributeError()\n except:\n additional_info = py_db.set_additional_thread_info(thread)\n\n # print('enter thread tracer', thread, get_current_thread_id(thread))\n args = (py_db, thread, additional_info, global_cache_skips, global_cache_frame_skips)\n\n if f_unhandled is not None:\n if f_unhandled.f_back is None and not force_only_unhandled_tracer:\n # Happens when we attach to a running program (cannot reuse instance because it's mutable).\n top_level_thread_tracer = TopLevelThreadTracerNoBackFrame(ThreadTracer(args), args)\n additional_info.top_level_thread_tracer_no_back_frames.append(\n top_level_thread_tracer\n ) # Hack for cython to keep it alive while the thread is alive (just the method in the SetTrace is not enough).\n else:\n top_level_thread_tracer = additional_info.top_level_thread_tracer_unhandled\n if top_level_thread_tracer is None:\n # Stop in some internal place to report about unhandled exceptions\n top_level_thread_tracer = TopLevelThreadTracerOnlyUnhandledExceptions(args)\n additional_info.top_level_thread_tracer_unhandled = top_level_thread_tracer # Hack for cython to keep it alive while the thread is alive (just the method in the SetTrace is not enough).\n\n # print(' --> found to trace unhandled', f_unhandled.f_code.co_name, f_unhandled.f_code.co_filename, f_unhandled.f_code.co_firstlineno)\n f_trace = top_level_thread_tracer.get_trace_dispatch_func()\n # fmt: off\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n f_trace = SafeCallWrapper(f_trace)\n # ENDIF\n # fmt: on\n f_unhandled.f_trace = f_trace\n\n if frame is f_unhandled:\n return f_trace, False\n\n thread_tracer = additional_info.thread_tracer\n if thread_tracer is None or thread_tracer._args[0] is not py_db:\n thread_tracer = ThreadTracer(args)\n additional_info.thread_tracer = thread_tracer\n\n # fmt: off\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n return SafeCallWrapper(thread_tracer), True\n # ELSE\n# return thread_tracer, True\n # ENDIF\n # fmt: on\n\n\ndef trace_dispatch(py_db, frame, event, arg):\n thread_trace_func, apply_to_settrace = py_db.fix_top_level_trace_and_get_trace_func(py_db, frame)\n if thread_trace_func is None:\n return None if event == "call" else NO_FTRACE\n if apply_to_settrace:\n py_db.enable_tracing(thread_trace_func)\n return thread_trace_func(frame, event, arg)\n\n\n# fmt: off\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\ncdef class TopLevelThreadTracerOnlyUnhandledExceptions:\n cdef public tuple _args;\n def __init__(self, tuple args):\n self._args = args\n# ELSE\n# class TopLevelThreadTracerOnlyUnhandledExceptions(object):\n# def __init__(self, args):\n# self._args = args\n# \n# ENDIF\n# fmt: on\n\n def trace_unhandled_exceptions(self, frame, event, arg):\n # Note that we ignore the frame as this tracing method should only be put in topmost frames already.\n # print('trace_unhandled_exceptions', event, frame.f_code.co_name, frame.f_code.co_filename, frame.f_code.co_firstlineno)\n if event == "exception" and arg is not None:\n py_db, t, additional_info = self._args[0:3]\n if arg is not None:\n if not additional_info.suspended_at_unhandled:\n additional_info.suspended_at_unhandled = True\n\n py_db.stop_on_unhandled_exception(py_db, t, additional_info, arg)\n\n # No need to reset frame.f_trace to keep the same trace function.\n return self.trace_unhandled_exceptions\n\n def get_trace_dispatch_func(self):\n return self.trace_unhandled_exceptions\n\n# fmt: off\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\ncdef class TopLevelThreadTracerNoBackFrame:\n cdef public object _frame_trace_dispatch;\n cdef public tuple _args;\n cdef public object try_except_infos;\n cdef public object _last_exc_arg;\n cdef public set _raise_lines;\n cdef public int _last_raise_line;\n def __init__(self, frame_trace_dispatch, tuple args):\n self._frame_trace_dispatch = frame_trace_dispatch\n self._args = args\n self.try_except_infos = None\n self._last_exc_arg = None\n self._raise_lines = set()\n self._last_raise_line = -1\n# ELSE\n# class TopLevelThreadTracerNoBackFrame(object):\n# """\n# This tracer is pretty special in that it's dealing with a frame without f_back (i.e.: top frame\n# on remote attach or QThread).\n# \n# This means that we have to carefully inspect exceptions to discover whether the exception will\n# be unhandled or not (if we're dealing with an unhandled exception we need to stop as unhandled,\n# otherwise we need to use the regular tracer -- unfortunately the debugger has little info to\n# work with in the tracing -- see: https://bugs.python.org/issue34099, so, we inspect bytecode to\n# determine if some exception will be traced or not... note that if this is not available -- such\n# as on Jython -- we consider any top-level exception to be unnhandled).\n# """\n# \n# def __init__(self, frame_trace_dispatch, args):\n# self._frame_trace_dispatch = frame_trace_dispatch\n# self._args = args\n# self.try_except_infos = None\n# self._last_exc_arg = None\n# self._raise_lines = set()\n# self._last_raise_line = -1\n# \n# ENDIF\n# fmt: on\n\n def trace_dispatch_and_unhandled_exceptions(self, frame, event, arg):\n # DEBUG = 'code_to_debug' in frame.f_code.co_filename\n # if DEBUG: print('trace_dispatch_and_unhandled_exceptions: %s %s %s %s %s %s' % (event, frame.f_code.co_name, frame.f_code.co_filename, frame.f_code.co_firstlineno, self._frame_trace_dispatch, frame.f_lineno))\n frame_trace_dispatch = self._frame_trace_dispatch\n if frame_trace_dispatch is not None:\n self._frame_trace_dispatch = frame_trace_dispatch(frame, event, arg)\n\n if event == "exception":\n self._last_exc_arg = arg\n self._raise_lines.add(frame.f_lineno)\n self._last_raise_line = frame.f_lineno\n\n elif event == "return" and self._last_exc_arg is not None:\n # For unhandled exceptions we actually track the return when at the topmost level.\n try:\n py_db, t, additional_info = self._args[0:3]\n if not additional_info.suspended_at_unhandled: # Note: only check it here, don't set.\n if is_unhandled_exception(self, py_db, frame, self._last_raise_line, self._raise_lines):\n py_db.stop_on_unhandled_exception(py_db, t, additional_info, self._last_exc_arg)\n finally:\n # Remove reference to exception after handling it.\n self._last_exc_arg = None\n\n ret = self.trace_dispatch_and_unhandled_exceptions\n\n # Need to reset (the call to _frame_trace_dispatch may have changed it).\n # fmt: off\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n frame.f_trace = SafeCallWrapper(ret)\n # ELSE\n# frame.f_trace = ret\n # ENDIF\n # fmt: on\n return ret\n\n def get_trace_dispatch_func(self):\n return self.trace_dispatch_and_unhandled_exceptions\n\n\n# fmt: off\n# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\ncdef class ThreadTracer:\n cdef public tuple _args;\n def __init__(self, tuple args):\n self._args = args\n# ELSE\n# class ThreadTracer(object):\n# def __init__(self, args):\n# self._args = args\n# \n# ENDIF\n# fmt: on\n\n def __call__(self, frame, event, arg):\n """This is the callback used when we enter some context in the debugger.\n\n We also decorate the thread we are in with info about the debugging.\n The attributes added are:\n pydev_state\n pydev_step_stop\n pydev_step_cmd\n pydev_notify_kill\n\n :param PyDB py_db:\n This is the global debugger (this method should actually be added as a method to it).\n """\n # fmt: off\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n cdef str filename;\n cdef str base;\n cdef int pydev_step_cmd;\n cdef object frame_cache_key;\n cdef dict cache_skips;\n cdef bint is_stepping;\n cdef tuple abs_path_canonical_path_and_base;\n cdef PyDBAdditionalThreadInfo additional_info;\n # ENDIF\n # fmt: on\n\n # DEBUG = 'code_to_debug' in frame.f_code.co_filename\n # if DEBUG: print('ENTER: trace_dispatch: %s %s %s %s' % (frame.f_code.co_filename, frame.f_lineno, event, frame.f_code.co_name))\n py_db, t, additional_info, cache_skips, frame_skips_cache = self._args\n if additional_info.is_tracing:\n return None if event == "call" else NO_FTRACE # we don't wan't to trace code invoked from pydevd_frame.trace_dispatch\n\n additional_info.is_tracing += 1\n try:\n pydev_step_cmd = additional_info.pydev_step_cmd\n is_stepping = pydev_step_cmd != -1\n if py_db.pydb_disposed:\n return None if event == "call" else NO_FTRACE\n\n # if thread is not alive, cancel trace_dispatch processing\n if not is_thread_alive(t):\n py_db.notify_thread_not_alive(get_current_thread_id(t))\n return None if event == "call" else NO_FTRACE\n\n # Note: it's important that the context name is also given because we may hit something once\n # in the global context and another in the local context.\n frame_cache_key = frame.f_code\n if frame_cache_key in cache_skips:\n if not is_stepping:\n # if DEBUG: print('skipped: trace_dispatch (cache hit)', frame_cache_key, frame.f_lineno, event, frame.f_code.co_name)\n return None if event == "call" else NO_FTRACE\n else:\n # When stepping we can't take into account caching based on the breakpoints (only global filtering).\n if cache_skips.get(frame_cache_key) == 1:\n if (\n additional_info.pydev_original_step_cmd in (107, 144)\n and not _global_notify_skipped_step_in\n ):\n notify_skipped_step_in_because_of_filters(py_db, frame)\n\n back_frame = frame.f_back\n if back_frame is not None and pydev_step_cmd in (\n 107,\n 144,\n 109,\n 160,\n ):\n back_frame_cache_key = back_frame.f_code\n if cache_skips.get(back_frame_cache_key) == 1:\n # if DEBUG: print('skipped: trace_dispatch (cache hit: 1)', frame_cache_key, frame.f_lineno, event, frame.f_code.co_name)\n return None if event == "call" else NO_FTRACE\n else:\n # if DEBUG: print('skipped: trace_dispatch (cache hit: 2)', frame_cache_key, frame.f_lineno, event, frame.f_code.co_name)\n return None if event == "call" else NO_FTRACE\n\n try:\n # Make fast path faster!\n abs_path_canonical_path_and_base = NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]\n except:\n abs_path_canonical_path_and_base = get_abs_path_real_path_and_base_from_frame(frame)\n\n file_type = py_db.get_file_type(\n frame, abs_path_canonical_path_and_base\n ) # we don't want to debug threading or anything related to pydevd\n\n if file_type is not None:\n if file_type == 1: # inlining LIB_FILE = 1\n if not py_db.in_project_scope(frame, abs_path_canonical_path_and_base[0]):\n # if DEBUG: print('skipped: trace_dispatch (not in scope)', abs_path_canonical_path_and_base[2], frame.f_lineno, event, frame.f_code.co_name, file_type)\n cache_skips[frame_cache_key] = 1\n return None if event == "call" else NO_FTRACE\n else:\n # if DEBUG: print('skipped: trace_dispatch', abs_path_canonical_path_and_base[2], frame.f_lineno, event, frame.f_code.co_name, file_type)\n cache_skips[frame_cache_key] = 1\n return None if event == "call" else NO_FTRACE\n\n if py_db.is_files_filter_enabled:\n if py_db.apply_files_filter(frame, abs_path_canonical_path_and_base[0], False):\n cache_skips[frame_cache_key] = 1\n\n if (\n is_stepping\n and additional_info.pydev_original_step_cmd in (107, 144)\n and not _global_notify_skipped_step_in\n ):\n notify_skipped_step_in_because_of_filters(py_db, frame)\n\n # A little gotcha, sometimes when we're stepping in we have to stop in a\n # return event showing the back frame as the current frame, so, we need\n # to check not only the current frame but the back frame too.\n back_frame = frame.f_back\n if back_frame is not None and pydev_step_cmd in (\n 107,\n 144,\n 109,\n 160,\n ):\n if py_db.apply_files_filter(back_frame, back_frame.f_code.co_filename, False):\n back_frame_cache_key = back_frame.f_code\n cache_skips[back_frame_cache_key] = 1\n # if DEBUG: print('skipped: trace_dispatch (filtered out: 1)', frame_cache_key, frame.f_lineno, event, frame.f_code.co_name)\n return None if event == "call" else NO_FTRACE\n else:\n # if DEBUG: print('skipped: trace_dispatch (filtered out: 2)', frame_cache_key, frame.f_lineno, event, frame.f_code.co_name)\n return None if event == "call" else NO_FTRACE\n\n # if DEBUG: print('trace_dispatch', filename, frame.f_lineno, event, frame.f_code.co_name, file_type)\n\n # Just create PyDBFrame directly (removed support for Python versions < 2.5, which required keeping a weak\n # reference to the frame).\n ret = PyDBFrame(\n (\n py_db,\n abs_path_canonical_path_and_base,\n additional_info,\n t,\n frame_skips_cache,\n frame_cache_key,\n )\n ).trace_dispatch(frame, event, arg)\n if ret is None:\n # 1 means skipped because of filters.\n # 2 means skipped because no breakpoints were hit.\n cache_skips[frame_cache_key] = 2\n return None if event == "call" else NO_FTRACE\n\n # fmt: off\n # IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)\n frame.f_trace = SafeCallWrapper(ret) # Make sure we keep the returned tracer.\n # ELSE\n# frame.f_trace = ret # Make sure we keep the returned tracer.\n # ENDIF\n # fmt: on\n return ret\n\n except SystemExit:\n return None if event == "call" else NO_FTRACE\n\n except Exception:\n if py_db.pydb_disposed:\n return None if event == "call" else NO_FTRACE # Don't log errors when we're shutting down.\n # Log it\n try:\n if pydev_log_exception is not None:\n # This can actually happen during the interpreter shutdown in Python 2.7\n pydev_log_exception()\n except:\n # Error logging? We're really in the interpreter shutdown...\n # (https://github.com/fabioz/PyDev.Debugger/issues/8)\n pass\n return None if event == "call" else NO_FTRACE\n finally:\n additional_info.is_tracing -= 1\n\n\nif USE_CUSTOM_SYS_CURRENT_FRAMES_MAP:\n # This is far from ideal, as we'll leak frames (we'll always have the last created frame, not really\n # the last topmost frame saved -- this should be Ok for our usage, but it may leak frames and things\n # may live longer... as IronPython is garbage-collected, things should live longer anyways, so, it\n # shouldn't be an issue as big as it's in CPython -- it may still be annoying, but this should\n # be a reasonable workaround until IronPython itself is able to provide that functionality).\n #\n # See: https://github.com/IronLanguages/main/issues/1630\n from _pydevd_bundle.pydevd_constants import constructed_tid_to_last_frame\n\n _original_call = ThreadTracer.__call__\n\n def __call__(self, frame, event, arg):\n constructed_tid_to_last_frame[self._args[1].ident] = frame\n return _original_call(self, frame, event, arg)\n\n ThreadTracer.__call__ = __call__\n\nif PYDEVD_USE_SYS_MONITORING:\n\n def fix_top_level_trace_and_get_trace_func(*args, **kwargs):\n raise RuntimeError("Not used in sys.monitoring mode.")\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_cython.pyx
|
pydevd_cython.pyx
|
Other
| 101,046 | 0.75 | 0.216751 | 0.313465 |
react-lib
| 970 |
2025-06-12T13:08:44.821942
|
BSD-3-Clause
| false |
2bbb2271fde3551b55401b65ebf9114f
|
import sys\n\ntry:\n try:\n from _pydevd_bundle_ext import pydevd_cython as mod\n\n except ImportError:\n from _pydevd_bundle import pydevd_cython as mod\n\nexcept ImportError:\n import struct\n\n try:\n is_python_64bit = struct.calcsize("P") == 8\n except:\n # In Jython this call fails, but this is Ok, we don't support Jython for speedups anyways.\n raise ImportError\n plat = "32"\n if is_python_64bit:\n plat = "64"\n\n # We also accept things as:\n #\n # _pydevd_bundle.pydevd_cython_win32_27_32\n # _pydevd_bundle.pydevd_cython_win32_34_64\n #\n # to have multiple pre-compiled pyds distributed along the IDE\n # (generated by build_tools/build_binaries_windows.py).\n\n mod_name = "pydevd_cython_%s_%s%s_%s" % (sys.platform, sys.version_info[0], sys.version_info[1], plat)\n check_name = "_pydevd_bundle.%s" % (mod_name,)\n mod = getattr(__import__(check_name), mod_name)\n\n# Regardless of how it was found, make sure it's later available as the\n# initial name so that the expected types from cython in frame eval\n# are valid.\nsys.modules["_pydevd_bundle.pydevd_cython"] = mod\n\ntrace_dispatch = mod.trace_dispatch\n\nPyDBAdditionalThreadInfo = mod.PyDBAdditionalThreadInfo\n\nset_additional_thread_info = mod.set_additional_thread_info\n\nany_thread_stepping = mod.any_thread_stepping\n\nremove_additional_info = mod.remove_additional_info\n\nglobal_cache_skips = mod.global_cache_skips\n\nglobal_cache_frame_skips = mod.global_cache_frame_skips\n\n_set_additional_thread_info_lock = mod._set_additional_thread_info_lock\n\nfix_top_level_trace_and_get_trace_func = mod.fix_top_level_trace_and_get_trace_func\n\nhandle_exception = mod.handle_exception\n\nshould_stop_on_exception = mod.should_stop_on_exception\n\nis_unhandled_exception = mod.is_unhandled_exception\n\nversion = getattr(mod, "version", 0)\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_cython_wrapper.py
|
pydevd_cython_wrapper.py
|
Python
| 1,913 | 0.95 | 0.079365 | 0.255814 |
python-kit
| 552 |
2024-10-31T10:15:36.460575
|
Apache-2.0
| false |
75f0b3c9c9c753841c48aee9ea885ebd
|
from _pydev_bundle._pydev_saved_modules import threading\nfrom _pydev_bundle import _pydev_saved_modules\nfrom _pydevd_bundle.pydevd_utils import notify_about_gevent_if_needed\nimport weakref\nfrom _pydevd_bundle.pydevd_constants import (\n IS_JYTHON,\n IS_IRONPYTHON,\n PYDEVD_APPLY_PATCHING_TO_HIDE_PYDEVD_THREADS,\n PYDEVD_USE_SYS_MONITORING,\n)\nfrom _pydev_bundle.pydev_log import exception as pydev_log_exception\nimport sys\nfrom _pydev_bundle import pydev_log\nimport pydevd_tracing\nfrom _pydevd_bundle.pydevd_collect_bytecode_info import iter_instructions\nfrom _pydevd_sys_monitoring import pydevd_sys_monitoring\n\nif IS_JYTHON:\n import org.python.core as JyCore # @UnresolvedImport\n\n\nclass PyDBDaemonThread(threading.Thread):\n def __init__(self, py_db, target_and_args=None):\n """\n :param target_and_args:\n tuple(func, args, kwargs) if this should be a function and args to run.\n -- Note: use through run_as_pydevd_daemon_thread().\n """\n threading.Thread.__init__(self)\n notify_about_gevent_if_needed()\n self._py_db = weakref.ref(py_db)\n self._kill_received = False\n mark_as_pydevd_daemon_thread(self)\n self._target_and_args = target_and_args\n\n @property\n def py_db(self):\n return self._py_db()\n\n def run(self):\n created_pydb_daemon = self.py_db.created_pydb_daemon_threads\n created_pydb_daemon[self] = 1\n try:\n try:\n if IS_JYTHON and not isinstance(threading.current_thread(), threading._MainThread):\n # we shouldn't update sys.modules for the main thread, cause it leads to the second importing 'threading'\n # module, and the new instance of main thread is created\n ss = JyCore.PySystemState()\n # Note: Py.setSystemState() affects only the current thread.\n JyCore.Py.setSystemState(ss)\n\n self._stop_trace()\n self._on_run()\n except:\n if sys is not None and pydev_log_exception is not None:\n pydev_log_exception()\n finally:\n del created_pydb_daemon[self]\n\n def _on_run(self):\n if self._target_and_args is not None:\n target, args, kwargs = self._target_and_args\n target(*args, **kwargs)\n else:\n raise NotImplementedError("Should be reimplemented by: %s" % self.__class__)\n\n def do_kill_pydev_thread(self):\n if not self._kill_received:\n pydev_log.debug("%s received kill signal", self.name)\n self._kill_received = True\n\n def _stop_trace(self):\n if self.pydev_do_not_trace:\n if PYDEVD_USE_SYS_MONITORING:\n pydevd_sys_monitoring.stop_monitoring(all_threads=False)\n return\n pydevd_tracing.SetTrace(None) # no debugging on this thread\n\n\ndef _collect_load_names(func):\n found_load_names = set()\n for instruction in iter_instructions(func.__code__):\n if instruction.opname in ("LOAD_GLOBAL", "LOAD_ATTR", "LOAD_METHOD"):\n found_load_names.add(instruction.argrepr)\n return found_load_names\n\n\ndef _patch_threading_to_hide_pydevd_threads():\n """\n Patches the needed functions on the `threading` module so that the pydevd threads are hidden.\n\n Note that we patch the functions __code__ to avoid issues if some code had already imported those\n variables prior to the patching.\n """\n found_load_names = _collect_load_names(threading.enumerate)\n # i.e.: we'll only apply the patching if the function seems to be what we expect.\n\n new_threading_enumerate = None\n\n if found_load_names in (\n {"_active_limbo_lock", "_limbo", "_active", "values", "list"},\n {"_active_limbo_lock", "_limbo", "_active", "values", "NULL + list"},\n {"NULL + list", "_active", "_active_limbo_lock", "NULL|self + values", "_limbo"},\n {"_active_limbo_lock", "values + NULL|self", "_limbo", "_active", "list + NULL"},\n ):\n pydev_log.debug("Applying patching to hide pydevd threads (Py3 version).")\n\n def new_threading_enumerate():\n with _active_limbo_lock:\n ret = list(_active.values()) + list(_limbo.values())\n\n return [t for t in ret if not getattr(t, "is_pydev_daemon_thread", False)]\n\n elif found_load_names == set(("_active_limbo_lock", "_limbo", "_active", "values")):\n pydev_log.debug("Applying patching to hide pydevd threads (Py2 version).")\n\n def new_threading_enumerate():\n with _active_limbo_lock:\n ret = _active.values() + _limbo.values()\n\n return [t for t in ret if not getattr(t, "is_pydev_daemon_thread", False)]\n\n else:\n pydev_log.info("Unable to hide pydevd threads. Found names in threading.enumerate: %s", found_load_names)\n\n if new_threading_enumerate is not None:\n\n def pydevd_saved_threading_enumerate():\n with threading._active_limbo_lock:\n return list(threading._active.values()) + list(threading._limbo.values())\n\n _pydev_saved_modules.pydevd_saved_threading_enumerate = pydevd_saved_threading_enumerate\n\n threading.enumerate.__code__ = new_threading_enumerate.__code__\n\n # We also need to patch the active count (to match what we have in the enumerate).\n def new_active_count():\n # Note: as this will be executed in the `threading` module, `enumerate` will\n # actually be threading.enumerate.\n return len(enumerate())\n\n threading.active_count.__code__ = new_active_count.__code__\n\n # When shutting down, Python (on some versions) may do something as:\n #\n # def _pickSomeNonDaemonThread():\n # for t in enumerate():\n # if not t.daemon and t.is_alive():\n # return t\n # return None\n #\n # But in this particular case, we do want threads with `is_pydev_daemon_thread` to appear\n # explicitly due to the pydevd `CheckAliveThread` (because we want the shutdown to wait on it).\n # So, it can't rely on the `enumerate` for that anymore as it's patched to not return pydevd threads.\n if hasattr(threading, "_pickSomeNonDaemonThread"):\n\n def new_pick_some_non_daemon_thread():\n with _active_limbo_lock:\n # Ok for py2 and py3.\n threads = list(_active.values()) + list(_limbo.values())\n\n for t in threads:\n if not t.daemon and t.is_alive():\n return t\n return None\n\n threading._pickSomeNonDaemonThread.__code__ = new_pick_some_non_daemon_thread.__code__\n\n\n_patched_threading_to_hide_pydevd_threads = False\n\n\ndef mark_as_pydevd_daemon_thread(thread):\n if not IS_JYTHON and not IS_IRONPYTHON and PYDEVD_APPLY_PATCHING_TO_HIDE_PYDEVD_THREADS:\n global _patched_threading_to_hide_pydevd_threads\n if not _patched_threading_to_hide_pydevd_threads:\n # When we mark the first thread as a pydevd daemon thread, we also change the threading\n # functions to hide pydevd threads.\n # Note: we don't just "hide" the pydevd threads from the threading module by not using it\n # (i.e.: just using the `thread.start_new_thread` instead of `threading.Thread`)\n # because there's 1 thread (the `CheckAliveThread`) which is a pydevd thread but\n # isn't really a daemon thread (so, we need CPython to wait on it for shutdown,\n # in which case it needs to be in `threading` and the patching would be needed anyways).\n _patched_threading_to_hide_pydevd_threads = True\n try:\n _patch_threading_to_hide_pydevd_threads()\n except:\n pydev_log.exception("Error applying patching to hide pydevd threads.")\n\n thread.pydev_do_not_trace = True\n thread.is_pydev_daemon_thread = True\n thread.daemon = True\n\n\ndef run_as_pydevd_daemon_thread(py_db, func, *args, **kwargs):\n """\n Runs a function as a pydevd daemon thread (without any tracing in place).\n """\n t = PyDBDaemonThread(py_db, target_and_args=(func, args, kwargs))\n t.name = "%s (pydevd daemon thread)" % (func.__name__,)\n t.start()\n return t\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_daemon_thread.py
|
pydevd_daemon_thread.py
|
Python
| 8,579 | 0.95 | 0.257426 | 0.159509 |
python-kit
| 222 |
2023-11-21T17:21:48.836853
|
MIT
| false |
e69e5b4d47bf2ac15ee2e6436d74b2aa
|
"""\nThis module holds the customization settings for the debugger.\n"""\n\nfrom _pydevd_bundle.pydevd_constants import QUOTED_LINE_PROTOCOL\nfrom _pydev_bundle import pydev_log\nimport sys\n\n\nclass PydevdCustomization(object):\n DEFAULT_PROTOCOL: str = QUOTED_LINE_PROTOCOL\n\n # Debug mode may be set to 'debugpy-dap'.\n #\n # In 'debugpy-dap' mode the following settings are done to PyDB:\n #\n # py_db.skip_suspend_on_breakpoint_exception = (BaseException,)\n # py_db.skip_print_breakpoint_exception = (NameError,)\n # py_db.multi_threads_single_notification = True\n DEBUG_MODE: str = ""\n\n # This may be a <sys_path_entry>;<module_name> to be pre-imported\n # Something as: 'c:/temp/foo;my_module.bar'\n #\n # What's done in this case is something as:\n #\n # sys.path.insert(0, <sys_path_entry>)\n # try:\n # import <module_name>\n # finally:\n # del sys.path[0]\n #\n # If the pre-import fails an output message is\n # sent (but apart from that debugger execution\n # should continue).\n PREIMPORT: str = ""\n\n\ndef on_pydb_init(py_db):\n if PydevdCustomization.DEBUG_MODE == "debugpy-dap":\n pydev_log.debug("Apply debug mode: debugpy-dap")\n py_db.skip_suspend_on_breakpoint_exception = (BaseException,)\n py_db.skip_print_breakpoint_exception = (NameError,)\n py_db.multi_threads_single_notification = True\n elif not PydevdCustomization.DEBUG_MODE:\n pydev_log.debug("Apply debug mode: default")\n else:\n pydev_log.debug("WARNING: unknown debug mode: %s", PydevdCustomization.DEBUG_MODE)\n\n if PydevdCustomization.PREIMPORT:\n pydev_log.debug("Preimport: %s", PydevdCustomization.PREIMPORT)\n try:\n sys_path_entry, module_name = PydevdCustomization.PREIMPORT.rsplit(";", maxsplit=1)\n except Exception:\n pydev_log.exception("Expected ';' in %s" % (PydevdCustomization.PREIMPORT,))\n else:\n try:\n sys.path.insert(0, sys_path_entry)\n try:\n __import__(module_name)\n finally:\n sys.path.remove(sys_path_entry)\n except Exception:\n pydev_log.exception("Error importing %s (with sys.path entry: %s)" % (module_name, sys_path_entry))\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_defaults.py
|
pydevd_defaults.py
|
Python
| 2,358 | 0.95 | 0.140625 | 0.375 |
react-lib
| 521 |
2024-10-27T04:53:45.149894
|
MIT
| false |
d2c3b8cbf3c6ac0517f7317ea70dc666
|
"""\nSupport for a tag that allows skipping over functions while debugging.\n"""\nimport linecache\nimport re\n\n# To suppress tracing a method, add the tag @DontTrace\n# to a comment either preceding or on the same line as\n# the method definition\n#\n# E.g.:\n# #@DontTrace\n# def test1():\n# pass\n#\n# ... or ...\n#\n# def test2(): #@DontTrace\n# pass\nDONT_TRACE_TAG = "@DontTrace"\n\n# Regular expression to match a decorator (at the beginning\n# of a line).\nRE_DECORATOR = re.compile(r"^\s*@")\n\n# Mapping from code object to bool.\n# If the key exists, the value is the cached result of should_trace_hook\n_filename_to_ignored_lines = {}\n\n\ndef default_should_trace_hook(code, absolute_filename):\n """\n Return True if this frame should be traced, False if tracing should be blocked.\n """\n # First, check whether this code object has a cached value\n ignored_lines = _filename_to_ignored_lines.get(absolute_filename)\n if ignored_lines is None:\n # Now, look up that line of code and check for a @DontTrace\n # preceding or on the same line as the method.\n # E.g.:\n # #@DontTrace\n # def test():\n # pass\n # ... or ...\n # def test(): #@DontTrace\n # pass\n ignored_lines = {}\n lines = linecache.getlines(absolute_filename)\n for i_line, line in enumerate(lines):\n j = line.find("#")\n if j >= 0:\n comment = line[j:]\n if DONT_TRACE_TAG in comment:\n ignored_lines[i_line] = 1\n\n # Note: when it's found in the comment, mark it up and down for the decorator lines found.\n k = i_line - 1\n while k >= 0:\n if RE_DECORATOR.match(lines[k]):\n ignored_lines[k] = 1\n k -= 1\n else:\n break\n\n k = i_line + 1\n while k <= len(lines):\n if RE_DECORATOR.match(lines[k]):\n ignored_lines[k] = 1\n k += 1\n else:\n break\n\n _filename_to_ignored_lines[absolute_filename] = ignored_lines\n\n func_line = code.co_firstlineno - 1 # co_firstlineno is 1-based, so -1 is needed\n return not (\n func_line - 1 in ignored_lines # -1 to get line before method\n or func_line in ignored_lines\n ) # method line\n\n\nshould_trace_hook = None\n\n\ndef clear_trace_filter_cache():\n """\n Clear the trace filter cache.\n Call this after reloading.\n """\n global should_trace_hook\n try:\n # Need to temporarily disable a hook because otherwise\n # _filename_to_ignored_lines.clear() will never complete.\n old_hook = should_trace_hook\n should_trace_hook = None\n\n # Clear the linecache\n linecache.clearcache()\n _filename_to_ignored_lines.clear()\n\n finally:\n should_trace_hook = old_hook\n\n\ndef trace_filter(mode):\n """\n Set the trace filter mode.\n\n mode: Whether to enable the trace hook.\n True: Trace filtering on (skipping methods tagged @DontTrace)\n False: Trace filtering off (trace methods tagged @DontTrace)\n None/default: Toggle trace filtering.\n """\n global should_trace_hook\n if mode is None:\n mode = should_trace_hook is None\n\n if mode:\n should_trace_hook = default_should_trace_hook\n else:\n should_trace_hook = None\n\n return mode\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_dont_trace.py
|
pydevd_dont_trace.py
|
Python
| 3,685 | 0.95 | 0.195122 | 0.300971 |
python-kit
| 408 |
2024-09-26T20:50:03.859215
|
GPL-3.0
| false |
ec7cb4c549828177e9e9b48ca021e6ad
|
# Important: Autogenerated file.\n\n# fmt: off\n# DO NOT edit manually!\n# DO NOT edit manually!\n\nLIB_FILE = 1\nPYDEV_FILE = 2\n\nDONT_TRACE_DIRS = {\n '_pydev_bundle': PYDEV_FILE,\n '_pydev_runfiles': PYDEV_FILE,\n '_pydevd_bundle': PYDEV_FILE,\n '_pydevd_frame_eval': PYDEV_FILE,\n '_pydevd_sys_monitoring': PYDEV_FILE,\n 'pydev_ipython': LIB_FILE,\n 'pydev_sitecustomize': PYDEV_FILE,\n 'pydevd_attach_to_process': PYDEV_FILE,\n 'pydevd_concurrency_analyser': PYDEV_FILE,\n 'pydevd_plugins': PYDEV_FILE,\n 'test_pydevd_reload': PYDEV_FILE,\n}\n\nLIB_FILES_IN_DONT_TRACE_DIRS = {\n '__init__.py',\n 'inputhook.py',\n 'inputhookglut.py',\n 'inputhookgtk.py',\n 'inputhookgtk3.py',\n 'inputhookpyglet.py',\n 'inputhookqt4.py',\n 'inputhookqt5.py',\n 'inputhookqt6.py',\n 'inputhooktk.py',\n 'inputhookwx.py',\n 'matplotlibtools.py',\n 'qt.py',\n 'qt_for_kernel.py',\n 'qt_loaders.py',\n 'version.py',\n}\n\nDONT_TRACE = {\n # commonly used things from the stdlib that we don't want to trace\n 'Queue.py':LIB_FILE,\n 'queue.py':LIB_FILE,\n 'socket.py':LIB_FILE,\n 'weakref.py':LIB_FILE,\n '_weakrefset.py':LIB_FILE,\n 'linecache.py':LIB_FILE,\n 'threading.py':LIB_FILE,\n 'dis.py':LIB_FILE,\n\n # things from pydev that we don't want to trace\n '__main__pydevd_gen_debug_adapter_protocol.py': PYDEV_FILE,\n '_pydev_calltip_util.py': PYDEV_FILE,\n '_pydev_completer.py': PYDEV_FILE,\n '_pydev_execfile.py': PYDEV_FILE,\n '_pydev_filesystem_encoding.py': PYDEV_FILE,\n '_pydev_getopt.py': PYDEV_FILE,\n '_pydev_imports_tipper.py': PYDEV_FILE,\n '_pydev_jy_imports_tipper.py': PYDEV_FILE,\n '_pydev_log.py': PYDEV_FILE,\n '_pydev_saved_modules.py': PYDEV_FILE,\n '_pydev_sys_patch.py': PYDEV_FILE,\n '_pydev_tipper_common.py': PYDEV_FILE,\n '_pydevd_sys_monitoring.py': PYDEV_FILE,\n 'django_debug.py': PYDEV_FILE,\n 'jinja2_debug.py': PYDEV_FILE,\n 'pycompletionserver.py': PYDEV_FILE,\n 'pydev_app_engine_debug_startup.py': PYDEV_FILE,\n 'pydev_console_utils.py': PYDEV_FILE,\n 'pydev_import_hook.py': PYDEV_FILE,\n 'pydev_imports.py': PYDEV_FILE,\n 'pydev_ipython_console.py': PYDEV_FILE,\n 'pydev_ipython_console_011.py': PYDEV_FILE,\n 'pydev_is_thread_alive.py': PYDEV_FILE,\n 'pydev_localhost.py': PYDEV_FILE,\n 'pydev_log.py': PYDEV_FILE,\n 'pydev_monkey.py': PYDEV_FILE,\n 'pydev_monkey_qt.py': PYDEV_FILE,\n 'pydev_override.py': PYDEV_FILE,\n 'pydev_run_in_console.py': PYDEV_FILE,\n 'pydev_runfiles.py': PYDEV_FILE,\n 'pydev_runfiles_coverage.py': PYDEV_FILE,\n 'pydev_runfiles_nose.py': PYDEV_FILE,\n 'pydev_runfiles_parallel.py': PYDEV_FILE,\n 'pydev_runfiles_parallel_client.py': PYDEV_FILE,\n 'pydev_runfiles_pytest2.py': PYDEV_FILE,\n 'pydev_runfiles_unittest.py': PYDEV_FILE,\n 'pydev_runfiles_xml_rpc.py': PYDEV_FILE,\n 'pydev_umd.py': PYDEV_FILE,\n 'pydev_versioncheck.py': PYDEV_FILE,\n 'pydevconsole.py': PYDEV_FILE,\n 'pydevconsole_code.py': PYDEV_FILE,\n 'pydevd.py': PYDEV_FILE,\n 'pydevd_additional_thread_info.py': PYDEV_FILE,\n 'pydevd_additional_thread_info_regular.py': PYDEV_FILE,\n 'pydevd_api.py': PYDEV_FILE,\n 'pydevd_base_schema.py': PYDEV_FILE,\n 'pydevd_breakpoints.py': PYDEV_FILE,\n 'pydevd_bytecode_utils.py': PYDEV_FILE,\n 'pydevd_bytecode_utils_py311.py': PYDEV_FILE,\n 'pydevd_code_to_source.py': PYDEV_FILE,\n 'pydevd_collect_bytecode_info.py': PYDEV_FILE,\n 'pydevd_comm.py': PYDEV_FILE,\n 'pydevd_comm_constants.py': PYDEV_FILE,\n 'pydevd_command_line_handling.py': PYDEV_FILE,\n 'pydevd_concurrency_logger.py': PYDEV_FILE,\n 'pydevd_console.py': PYDEV_FILE,\n 'pydevd_constants.py': PYDEV_FILE,\n 'pydevd_custom_frames.py': PYDEV_FILE,\n 'pydevd_cython_wrapper.py': PYDEV_FILE,\n 'pydevd_daemon_thread.py': PYDEV_FILE,\n 'pydevd_defaults.py': PYDEV_FILE,\n 'pydevd_dont_trace.py': PYDEV_FILE,\n 'pydevd_dont_trace_files.py': PYDEV_FILE,\n 'pydevd_exec2.py': PYDEV_FILE,\n 'pydevd_extension_api.py': PYDEV_FILE,\n 'pydevd_extension_utils.py': PYDEV_FILE,\n 'pydevd_file_utils.py': PYDEV_FILE,\n 'pydevd_filtering.py': PYDEV_FILE,\n 'pydevd_frame.py': PYDEV_FILE,\n 'pydevd_frame_eval_cython_wrapper.py': PYDEV_FILE,\n 'pydevd_frame_eval_main.py': PYDEV_FILE,\n 'pydevd_frame_tracing.py': PYDEV_FILE,\n 'pydevd_frame_utils.py': PYDEV_FILE,\n 'pydevd_gevent_integration.py': PYDEV_FILE,\n 'pydevd_helpers.py': PYDEV_FILE,\n 'pydevd_import_class.py': PYDEV_FILE,\n 'pydevd_io.py': PYDEV_FILE,\n 'pydevd_json_debug_options.py': PYDEV_FILE,\n 'pydevd_line_validation.py': PYDEV_FILE,\n 'pydevd_modify_bytecode.py': PYDEV_FILE,\n 'pydevd_net_command.py': PYDEV_FILE,\n 'pydevd_net_command_factory_json.py': PYDEV_FILE,\n 'pydevd_net_command_factory_xml.py': PYDEV_FILE,\n 'pydevd_plugin_numpy_types.py': PYDEV_FILE,\n 'pydevd_plugin_pandas_types.py': PYDEV_FILE,\n 'pydevd_plugin_utils.py': PYDEV_FILE,\n 'pydevd_plugins_django_form_str.py': PYDEV_FILE,\n 'pydevd_process_net_command.py': PYDEV_FILE,\n 'pydevd_process_net_command_json.py': PYDEV_FILE,\n 'pydevd_referrers.py': PYDEV_FILE,\n 'pydevd_reload.py': PYDEV_FILE,\n 'pydevd_resolver.py': PYDEV_FILE,\n 'pydevd_runpy.py': PYDEV_FILE,\n 'pydevd_safe_repr.py': PYDEV_FILE,\n 'pydevd_save_locals.py': PYDEV_FILE,\n 'pydevd_schema.py': PYDEV_FILE,\n 'pydevd_schema_log.py': PYDEV_FILE,\n 'pydevd_signature.py': PYDEV_FILE,\n 'pydevd_source_mapping.py': PYDEV_FILE,\n 'pydevd_stackless.py': PYDEV_FILE,\n 'pydevd_suspended_frames.py': PYDEV_FILE,\n 'pydevd_sys_monitoring.py': PYDEV_FILE,\n 'pydevd_thread_lifecycle.py': PYDEV_FILE,\n 'pydevd_thread_wrappers.py': PYDEV_FILE,\n 'pydevd_timeout.py': PYDEV_FILE,\n 'pydevd_trace_dispatch.py': PYDEV_FILE,\n 'pydevd_trace_dispatch_regular.py': PYDEV_FILE,\n 'pydevd_traceproperty.py': PYDEV_FILE,\n 'pydevd_tracing.py': PYDEV_FILE,\n 'pydevd_utils.py': PYDEV_FILE,\n 'pydevd_vars.py': PYDEV_FILE,\n 'pydevd_vm_type.py': PYDEV_FILE,\n 'pydevd_xml.py': PYDEV_FILE,\n}\n\n# if we try to trace io.py it seems it can get halted (see http://bugs.python.org/issue4716)\nDONT_TRACE['io.py'] = LIB_FILE\n\n# Don't trace common encodings too\nDONT_TRACE['cp1252.py'] = LIB_FILE\nDONT_TRACE['utf_8.py'] = LIB_FILE\nDONT_TRACE['codecs.py'] = LIB_FILE\n\n# fmt: on\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_dont_trace_files.py
|
pydevd_dont_trace_files.py
|
Python
| 6,541 | 0.95 | 0.011236 | 0.053254 |
vue-tools
| 762 |
2024-08-10T15:21:22.033625
|
BSD-3-Clause
| false |
79a3b9c52a9759791ca885d5165ea8af
|
import abc\nfrom typing import Any\n\n\n# borrowed from from six\ndef _with_metaclass(meta, *bases):\n """Create a base class with a metaclass."""\n\n class metaclass(meta):\n def __new__(cls, name, this_bases, d):\n return meta(name, bases, d)\n\n return type.__new__(metaclass, "temporary_class", (), {})\n\n\n# =======================================================================================================================\n# AbstractResolver\n# =======================================================================================================================\nclass _AbstractResolver(_with_metaclass(abc.ABCMeta)):\n """\n This class exists only for documentation purposes to explain how to create a resolver.\n\n Some examples on how to resolve things:\n - list: get_dictionary could return a dict with index->item and use the index to resolve it later\n - set: get_dictionary could return a dict with id(object)->object and reiterate in that array to resolve it later\n - arbitrary instance: get_dictionary could return dict with attr_name->attr and use getattr to resolve it later\n """\n\n @abc.abstractmethod\n def resolve(self, var, attribute):\n """\n In this method, we'll resolve some child item given the string representation of the item in the key\n representing the previously asked dictionary.\n\n :param var: this is the actual variable to be resolved.\n :param attribute: this is the string representation of a key previously returned in get_dictionary.\n """\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_dictionary(self, var):\n """\n :param var: this is the variable that should have its children gotten.\n\n :return: a dictionary where each pair key, value should be shown to the user as children items\n in the variables view for the given var.\n """\n raise NotImplementedError\n\n\nclass _AbstractProvider(_with_metaclass(abc.ABCMeta)):\n @abc.abstractmethod\n def can_provide(self, type_object, type_name):\n raise NotImplementedError\n\n\n# =======================================================================================================================\n# API CLASSES:\n# =======================================================================================================================\n\n\nclass TypeResolveProvider(_AbstractResolver, _AbstractProvider):\n """\n Implement this in an extension to provide a custom resolver, see _AbstractResolver\n """\n\n\nclass StrPresentationProvider(_AbstractProvider):\n """\n Implement this in an extension to provide a str presentation for a type\n """\n\n def get_str_in_context(self, val: Any, context: str):\n """\n :param val:\n This is the object for which we want a string representation.\n\n :param context:\n This is the context in which the variable is being requested. Valid values:\n "watch",\n "repl",\n "hover",\n "clipboard"\n\n :note: this method is not required (if it's not available, get_str is called directly,\n so, it's only needed if the string representation needs to be converted based on\n the context).\n """\n return self.get_str(val)\n\n @abc.abstractmethod\n def get_str(self, val):\n raise NotImplementedError\n\n\nclass DebuggerEventHandler(_with_metaclass(abc.ABCMeta)):\n """\n Implement this to receive lifecycle events from the debugger\n """\n\n def on_debugger_modules_loaded(self, **kwargs):\n """\n This method invoked after all debugger modules are loaded. Useful for importing and/or patching debugger\n modules at a safe time\n :param kwargs: This is intended to be flexible dict passed from the debugger.\n Currently passes the debugger version\n """\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_extension_api.py
|
pydevd_extension_api.py
|
Python
| 4,013 | 0.95 | 0.214953 | 0.08642 |
node-utils
| 112 |
2025-02-12T22:52:23.631524
|
Apache-2.0
| false |
655013abf53f3dc117d8abce6ef4fb39
|
import pkgutil\nimport sys\nfrom _pydev_bundle import pydev_log\n\ntry:\n import pydevd_plugins.extensions as extensions\nexcept:\n pydev_log.exception()\n extensions = None\n\n\nclass ExtensionManager(object):\n def __init__(self):\n self.loaded_extensions = None\n self.type_to_instance = {}\n\n def _load_modules(self):\n self.loaded_extensions = []\n if extensions:\n for module_loader, name, ispkg in pkgutil.walk_packages(extensions.__path__, extensions.__name__ + "."):\n mod_name = name.split(".")[-1]\n if not ispkg and mod_name.startswith("pydevd_plugin"):\n try:\n __import__(name)\n module = sys.modules[name]\n self.loaded_extensions.append(module)\n except ImportError:\n pydev_log.critical("Unable to load extension: %s", name)\n\n def _ensure_loaded(self):\n if self.loaded_extensions is None:\n self._load_modules()\n\n def _iter_attr(self):\n for extension in self.loaded_extensions:\n dunder_all = getattr(extension, "__all__", None)\n for attr_name in dir(extension):\n if not attr_name.startswith("_"):\n if dunder_all is None or attr_name in dunder_all:\n yield attr_name, getattr(extension, attr_name)\n\n def get_extension_classes(self, extension_type):\n self._ensure_loaded()\n if extension_type in self.type_to_instance:\n return self.type_to_instance[extension_type]\n handlers = self.type_to_instance.setdefault(extension_type, [])\n for attr_name, attr in self._iter_attr():\n if isinstance(attr, type) and issubclass(attr, extension_type) and attr is not extension_type:\n try:\n handlers.append(attr())\n except:\n pydev_log.exception("Unable to load extension class: %s", attr_name)\n return handlers\n\n\nEXTENSION_MANAGER_INSTANCE = ExtensionManager()\n\n\ndef extensions_of_type(extension_type):\n """\n\n :param T extension_type: The type of the extension hook\n :rtype: list[T]\n """\n return EXTENSION_MANAGER_INSTANCE.get_extension_classes(extension_type)\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_extension_utils.py
|
pydevd_extension_utils.py
|
Python
| 2,365 | 0.85 | 0.338462 | 0 |
node-utils
| 698 |
2023-07-14T20:43:33.563003
|
BSD-3-Clause
| false |
15257419b58b6f8cc6625233f9e14b39
|
import fnmatch\nimport glob\nimport os.path\nimport sys\n\nfrom _pydev_bundle import pydev_log\nimport pydevd_file_utils\nimport json\nfrom collections import namedtuple\nfrom _pydev_bundle._pydev_saved_modules import threading\nfrom pydevd_file_utils import normcase\nfrom _pydevd_bundle.pydevd_constants import USER_CODE_BASENAMES_STARTING_WITH, LIBRARY_CODE_BASENAMES_STARTING_WITH, IS_PYPY, IS_WINDOWS\nfrom _pydevd_bundle import pydevd_constants\nfrom _pydevd_bundle.pydevd_constants import is_true_in_env\n\nExcludeFilter = namedtuple("ExcludeFilter", "name, exclude, is_path")\n\n\ndef _convert_to_str_and_clear_empty(roots):\n new_roots = []\n for root in roots:\n assert isinstance(root, str), "%s not str (found: %s)" % (root, type(root))\n if root:\n new_roots.append(root)\n return new_roots\n\n\ndef _check_matches(patterns, paths):\n if not patterns and not paths:\n # Matched to the end.\n return True\n\n if (not patterns and paths) or (patterns and not paths):\n return False\n\n pattern = normcase(patterns[0])\n path = normcase(paths[0])\n\n if not glob.has_magic(pattern):\n if pattern != path:\n return False\n\n elif pattern == "**":\n if len(patterns) == 1:\n return True # if ** is the last one it matches anything to the right.\n\n for i in range(len(paths)):\n # Recursively check the remaining patterns as the\n # current pattern could match any number of paths.\n if _check_matches(patterns[1:], paths[i:]):\n return True\n\n elif not fnmatch.fnmatch(path, pattern):\n # Current part doesn't match.\n return False\n\n return _check_matches(patterns[1:], paths[1:])\n\n\ndef glob_matches_path(path, pattern, sep=os.sep, altsep=os.altsep):\n if altsep:\n pattern = pattern.replace(altsep, sep)\n path = path.replace(altsep, sep)\n\n drive = ""\n if len(path) > 1 and path[1] == ":":\n drive, path = path[0], path[2:]\n\n if drive and len(pattern) > 1:\n if pattern[1] == ":":\n if drive.lower() != pattern[0].lower():\n return False\n pattern = pattern[2:]\n\n patterns = pattern.split(sep)\n paths = path.split(sep)\n if paths:\n if paths[0] == "":\n paths = paths[1:]\n if patterns:\n if patterns[0] == "":\n patterns = patterns[1:]\n\n return _check_matches(patterns, paths)\n\n\nclass FilesFiltering(object):\n """\n Note: calls at FilesFiltering are uncached.\n\n The actual API used should be through PyDB.\n """\n\n def __init__(self):\n self._exclude_filters = []\n self._project_roots = []\n self._library_roots = []\n\n # Filter out libraries?\n self._use_libraries_filter = False\n self.require_module = False # True if some exclude filter filters by the module.\n\n self.set_use_libraries_filter(is_true_in_env("PYDEVD_FILTER_LIBRARIES"))\n\n project_roots = os.getenv("IDE_PROJECT_ROOTS", None)\n if project_roots is not None:\n project_roots = project_roots.split(os.pathsep)\n else:\n project_roots = []\n self.set_project_roots(project_roots)\n\n library_roots = os.getenv("LIBRARY_ROOTS", None)\n if library_roots is not None:\n library_roots = library_roots.split(os.pathsep)\n else:\n library_roots = self._get_default_library_roots()\n self.set_library_roots(library_roots)\n\n # Stepping filters.\n pydevd_filters = os.getenv("PYDEVD_FILTERS", "")\n # To filter out it's something as: {'**/not_my_code/**': True}\n if pydevd_filters:\n pydev_log.debug("PYDEVD_FILTERS %s", (pydevd_filters,))\n if pydevd_filters.startswith("{"):\n # dict(glob_pattern (str) -> exclude(True or False))\n exclude_filters = []\n for key, val in json.loads(pydevd_filters).items():\n exclude_filters.append(ExcludeFilter(key, val, True))\n self._exclude_filters = exclude_filters\n else:\n # A ';' separated list of strings with globs for the\n # list of excludes.\n filters = pydevd_filters.split(";")\n new_filters = []\n for new_filter in filters:\n if new_filter.strip():\n new_filters.append(ExcludeFilter(new_filter.strip(), True, True))\n self._exclude_filters = new_filters\n\n @classmethod\n def _get_default_library_roots(cls):\n pydev_log.debug("Collecting default library roots.")\n # Provide sensible defaults if not in env vars.\n import site\n\n roots = []\n\n try:\n import sysconfig # Python 2.7 onwards only.\n except ImportError:\n pass\n else:\n for path_name in set(("stdlib", "platstdlib", "purelib", "platlib")) & set(sysconfig.get_path_names()):\n roots.append(sysconfig.get_path(path_name))\n\n # Make sure we always get at least the standard library location (based on the `os` and\n # `threading` modules -- it's a bit weird that it may be different on the ci, but it happens).\n roots.append(os.path.dirname(os.__file__))\n roots.append(os.path.dirname(threading.__file__))\n if IS_PYPY:\n # On PyPy 3.6 (7.3.1) it wrongly says that sysconfig.get_path('stdlib') is\n # <install>/lib-pypy when the installed version is <install>/lib_pypy.\n try:\n import _pypy_wait\n except ImportError:\n pydev_log.debug("Unable to import _pypy_wait on PyPy when collecting default library roots.")\n else:\n pypy_lib_dir = os.path.dirname(_pypy_wait.__file__)\n pydev_log.debug("Adding %s to default library roots.", pypy_lib_dir)\n roots.append(pypy_lib_dir)\n\n if hasattr(site, "getusersitepackages"):\n site_paths = site.getusersitepackages()\n if isinstance(site_paths, (list, tuple)):\n for site_path in site_paths:\n roots.append(site_path)\n else:\n roots.append(site_paths)\n\n if hasattr(site, "getsitepackages"):\n site_paths = site.getsitepackages()\n if isinstance(site_paths, (list, tuple)):\n for site_path in site_paths:\n roots.append(site_path)\n else:\n roots.append(site_paths)\n\n for path in sys.path:\n if os.path.exists(path) and os.path.basename(path) in ("site-packages", "pip-global"):\n roots.append(path)\n\n # On WASM some of the roots may not exist, filter those out.\n roots = [path for path in roots if path is not None]\n roots.extend([os.path.realpath(path) for path in roots])\n\n return sorted(set(roots))\n\n def _fix_roots(self, roots):\n roots = _convert_to_str_and_clear_empty(roots)\n new_roots = []\n for root in roots:\n path = self._absolute_normalized_path(root)\n if pydevd_constants.IS_WINDOWS:\n new_roots.append(path + "\\")\n else:\n new_roots.append(path + "/")\n return new_roots\n\n def _absolute_normalized_path(self, filename):\n """\n Provides a version of the filename that's absolute and normalized.\n """\n return normcase(pydevd_file_utils.absolute_path(filename))\n\n def set_project_roots(self, project_roots):\n self._project_roots = self._fix_roots(project_roots)\n pydev_log.debug("IDE_PROJECT_ROOTS %s\n" % project_roots)\n\n def _get_project_roots(self):\n return self._project_roots\n\n def set_library_roots(self, roots):\n self._library_roots = self._fix_roots(roots)\n pydev_log.debug("LIBRARY_ROOTS %s\n" % roots)\n\n def _get_library_roots(self):\n return self._library_roots\n\n def in_project_roots(self, received_filename):\n """\n Note: don't call directly. Use PyDb.in_project_scope (there's no caching here and it doesn't\n handle all possibilities for knowing whether a project is actually in the scope, it\n just handles the heuristics based on the absolute_normalized_filename without the actual frame).\n """\n DEBUG = False\n\n if received_filename.startswith(USER_CODE_BASENAMES_STARTING_WITH):\n if DEBUG:\n pydev_log.debug(\n "In in_project_roots - user basenames - starts with %s (%s)", received_filename, USER_CODE_BASENAMES_STARTING_WITH\n )\n return True\n\n if received_filename.startswith(LIBRARY_CODE_BASENAMES_STARTING_WITH):\n if DEBUG:\n pydev_log.debug(\n "Not in in_project_roots - library basenames - starts with %s (%s)",\n received_filename,\n LIBRARY_CODE_BASENAMES_STARTING_WITH,\n )\n return False\n\n project_roots = self._get_project_roots() # roots are absolute/normalized.\n\n absolute_normalized_filename = self._absolute_normalized_path(received_filename)\n absolute_normalized_filename_as_dir = absolute_normalized_filename + ("\\" if IS_WINDOWS else "/")\n\n found_in_project = []\n for root in project_roots:\n if root and (absolute_normalized_filename.startswith(root) or root == absolute_normalized_filename_as_dir):\n if DEBUG:\n pydev_log.debug("In project: %s (%s)", absolute_normalized_filename, root)\n found_in_project.append(root)\n\n found_in_library = []\n library_roots = self._get_library_roots()\n for root in library_roots:\n if root and (absolute_normalized_filename.startswith(root) or root == absolute_normalized_filename_as_dir):\n found_in_library.append(root)\n if DEBUG:\n pydev_log.debug("In library: %s (%s)", absolute_normalized_filename, root)\n else:\n if DEBUG:\n pydev_log.debug("Not in library: %s (%s)", absolute_normalized_filename, root)\n\n if not project_roots:\n # If we have no project roots configured, consider it being in the project\n # roots if it's not found in site-packages (because we have defaults for those\n # and not the other way around).\n in_project = not found_in_library\n if DEBUG:\n pydev_log.debug("Final in project (no project roots): %s (%s)", absolute_normalized_filename, in_project)\n\n else:\n in_project = False\n if found_in_project:\n if not found_in_library:\n if DEBUG:\n pydev_log.debug("Final in project (in_project and not found_in_library): %s (True)", absolute_normalized_filename)\n in_project = True\n else:\n # Found in both, let's see which one has the bigger path matched.\n if max(len(x) for x in found_in_project) > max(len(x) for x in found_in_library):\n in_project = True\n if DEBUG:\n pydev_log.debug("Final in project (found in both): %s (%s)", absolute_normalized_filename, in_project)\n\n return in_project\n\n def use_libraries_filter(self):\n """\n Should we debug only what's inside project folders?\n """\n return self._use_libraries_filter\n\n def set_use_libraries_filter(self, use):\n pydev_log.debug("pydevd: Use libraries filter: %s\n" % use)\n self._use_libraries_filter = use\n\n def use_exclude_filters(self):\n # Enabled if we have any filters registered.\n return len(self._exclude_filters) > 0\n\n def exclude_by_filter(self, absolute_filename, module_name):\n """\n :return: True if it should be excluded, False if it should be included and None\n if no rule matched the given file.\n """\n for exclude_filter in self._exclude_filters: # : :type exclude_filter: ExcludeFilter\n if exclude_filter.is_path:\n if glob_matches_path(absolute_filename, exclude_filter.name):\n return exclude_filter.exclude\n else:\n # Module filter.\n if exclude_filter.name == module_name or module_name.startswith(exclude_filter.name + "."):\n return exclude_filter.exclude\n return None\n\n def set_exclude_filters(self, exclude_filters):\n """\n :param list(ExcludeFilter) exclude_filters:\n """\n self._exclude_filters = exclude_filters\n self.require_module = False\n for exclude_filter in exclude_filters:\n if not exclude_filter.is_path:\n self.require_module = True\n break\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_filtering.py
|
pydevd_filtering.py
|
Python
| 13,331 | 0.95 | 0.289941 | 0.078571 |
python-kit
| 687 |
2024-11-08T06:28:44.801123
|
MIT
| false |
1813415f80d249d89accb8f0fc7bd6d9
|
# Note: code gotten from _pydev_imports_tipper.\n\nimport sys\n\n\ndef _imp(name, log=None):\n try:\n return __import__(name)\n except:\n if "." in name:\n sub = name[0 : name.rfind(".")]\n\n if log is not None:\n log.add_content("Unable to import", name, "trying with", sub)\n log.add_exception()\n\n return _imp(sub, log)\n else:\n s = "Unable to import module: %s - sys.path: %s" % (str(name), sys.path)\n if log is not None:\n log.add_content(s)\n log.add_exception()\n\n raise ImportError(s)\n\n\nIS_IPY = False\nif sys.platform == "cli":\n IS_IPY = True\n _old_imp = _imp\n\n def _imp(name, log=None):\n # We must add a reference in clr for .Net\n import clr # @UnresolvedImport\n\n initial_name = name\n while "." in name:\n try:\n clr.AddReference(name)\n break # If it worked, that's OK.\n except:\n name = name[0 : name.rfind(".")]\n else:\n try:\n clr.AddReference(name)\n except:\n pass # That's OK (not dot net module).\n\n return _old_imp(initial_name, log)\n\n\ndef import_name(name, log=None):\n mod = _imp(name, log)\n\n components = name.split(".")\n\n old_comp = None\n for comp in components[1:]:\n try:\n # this happens in the following case:\n # we have mx.DateTime.mxDateTime.mxDateTime.pyd\n # but after importing it, mx.DateTime.mxDateTime shadows access to mxDateTime.pyd\n mod = getattr(mod, comp)\n except AttributeError:\n if old_comp != comp:\n raise\n\n old_comp = comp\n\n return mod\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_import_class.py
|
pydevd_import_class.py
|
Python
| 1,849 | 0.95 | 0.214286 | 0.09434 |
python-kit
| 169 |
2024-05-26T00:39:52.185678
|
GPL-3.0
| false |
deafdc8be7f789841ddce6cfcf6fb700
|
from _pydevd_bundle.pydevd_constants import ForkSafeLock, get_global_debugger\nimport os\nimport sys\nfrom contextlib import contextmanager\n\n\nclass IORedirector:\n """\n This class works to wrap a stream (stdout/stderr) with an additional redirect.\n """\n\n def __init__(self, original, new_redirect, wrap_buffer=False):\n """\n :param stream original:\n The stream to be wrapped (usually stdout/stderr, but could be None).\n\n :param stream new_redirect:\n Usually IOBuf (below).\n\n :param bool wrap_buffer:\n Whether to create a buffer attribute (needed to mimick python 3 s\n tdout/stderr which has a buffer to write binary data).\n """\n self._lock = ForkSafeLock(rlock=True)\n self._writing = False\n self._redirect_to = (original, new_redirect)\n if wrap_buffer and hasattr(original, "buffer"):\n self.buffer = IORedirector(original.buffer, new_redirect.buffer, False)\n\n def write(self, s):\n # Note that writing to the original stream may fail for some reasons\n # (such as trying to write something that's not a string or having it closed).\n with self._lock:\n if self._writing:\n return\n self._writing = True\n try:\n for r in self._redirect_to:\n if hasattr(r, "write"):\n r.write(s)\n finally:\n self._writing = False\n\n def isatty(self):\n for r in self._redirect_to:\n if hasattr(r, "isatty"):\n return r.isatty()\n return False\n\n def flush(self):\n for r in self._redirect_to:\n if hasattr(r, "flush"):\n r.flush()\n\n def __getattr__(self, name):\n for r in self._redirect_to:\n if hasattr(r, name):\n return getattr(r, name)\n raise AttributeError(name)\n\n\nclass RedirectToPyDBIoMessages(object):\n def __init__(self, out_ctx, wrap_stream, wrap_buffer, on_write=None):\n """\n :param out_ctx:\n 1=stdout and 2=stderr\n\n :param wrap_stream:\n Either sys.stdout or sys.stderr.\n\n :param bool wrap_buffer:\n If True the buffer attribute (which wraps writing bytes) should be\n wrapped.\n\n :param callable(str) on_write:\n May be a custom callable to be called when to write something.\n If not passed the default implementation will create an io message\n and send it through the debugger.\n """\n encoding = getattr(wrap_stream, "encoding", None)\n if not encoding:\n encoding = os.environ.get("PYTHONIOENCODING", "utf-8")\n self.encoding = encoding\n self._out_ctx = out_ctx\n if wrap_buffer:\n self.buffer = RedirectToPyDBIoMessages(out_ctx, wrap_stream, wrap_buffer=False, on_write=on_write)\n self._on_write = on_write\n\n def get_pydb(self):\n # Note: separate method for mocking on tests.\n return get_global_debugger()\n\n def flush(self):\n pass # no-op here\n\n def write(self, s):\n if self._on_write is not None:\n self._on_write(s)\n return\n\n if s:\n # Need s in str\n if isinstance(s, bytes):\n s = s.decode(self.encoding, errors="replace")\n\n py_db = self.get_pydb()\n if py_db is not None:\n # Note that the actual message contents will be a xml with utf-8, although\n # the entry is str on py3 and bytes on py2.\n cmd = py_db.cmd_factory.make_io_message(s, self._out_ctx)\n if py_db.writer is not None:\n py_db.writer.add_command(cmd)\n\n\nclass IOBuf:\n """This class works as a replacement for stdio and stderr.\n It is a buffer and when its contents are requested, it will erase what\n it has so far so that the next return will not return the same contents again.\n """\n\n def __init__(self):\n self.buflist = []\n import os\n\n self.encoding = os.environ.get("PYTHONIOENCODING", "utf-8")\n\n def getvalue(self):\n b = self.buflist\n self.buflist = [] # clear it\n return "".join(b) # bytes on py2, str on py3.\n\n def write(self, s):\n if isinstance(s, bytes):\n s = s.decode(self.encoding, errors="replace")\n self.buflist.append(s)\n\n def isatty(self):\n return False\n\n def flush(self):\n pass\n\n def empty(self):\n return len(self.buflist) == 0\n\n\nclass _RedirectInfo(object):\n def __init__(self, original, redirect_to):\n self.original = original\n self.redirect_to = redirect_to\n\n\nclass _RedirectionsHolder:\n _lock = ForkSafeLock(rlock=True)\n _stack_stdout = []\n _stack_stderr = []\n\n _pydevd_stdout_redirect_ = None\n _pydevd_stderr_redirect_ = None\n\n\ndef start_redirect(keep_original_redirection=False, std="stdout", redirect_to=None):\n """\n @param std: 'stdout', 'stderr', or 'both'\n """\n with _RedirectionsHolder._lock:\n if redirect_to is None:\n redirect_to = IOBuf()\n\n if std == "both":\n config_stds = ["stdout", "stderr"]\n else:\n config_stds = [std]\n\n for std in config_stds:\n original = getattr(sys, std)\n stack = getattr(_RedirectionsHolder, "_stack_%s" % std)\n\n if keep_original_redirection:\n wrap_buffer = True if hasattr(redirect_to, "buffer") else False\n new_std_instance = IORedirector(getattr(sys, std), redirect_to, wrap_buffer=wrap_buffer)\n setattr(sys, std, new_std_instance)\n else:\n new_std_instance = redirect_to\n setattr(sys, std, redirect_to)\n\n stack.append(_RedirectInfo(original, new_std_instance))\n\n return redirect_to\n\n\ndef end_redirect(std="stdout"):\n with _RedirectionsHolder._lock:\n if std == "both":\n config_stds = ["stdout", "stderr"]\n else:\n config_stds = [std]\n for std in config_stds:\n stack = getattr(_RedirectionsHolder, "_stack_%s" % std)\n redirect_info = stack.pop()\n setattr(sys, std, redirect_info.original)\n\n\ndef redirect_stream_to_pydb_io_messages(std):\n """\n :param std:\n 'stdout' or 'stderr'\n """\n with _RedirectionsHolder._lock:\n redirect_to_name = "_pydevd_%s_redirect_" % (std,)\n if getattr(_RedirectionsHolder, redirect_to_name) is None:\n wrap_buffer = True\n original = getattr(sys, std)\n\n redirect_to = RedirectToPyDBIoMessages(1 if std == "stdout" else 2, original, wrap_buffer)\n start_redirect(keep_original_redirection=True, std=std, redirect_to=redirect_to)\n\n stack = getattr(_RedirectionsHolder, "_stack_%s" % std)\n setattr(_RedirectionsHolder, redirect_to_name, stack[-1])\n return True\n\n return False\n\n\ndef stop_redirect_stream_to_pydb_io_messages(std):\n """\n :param std:\n 'stdout' or 'stderr'\n """\n with _RedirectionsHolder._lock:\n redirect_to_name = "_pydevd_%s_redirect_" % (std,)\n redirect_info = getattr(_RedirectionsHolder, redirect_to_name)\n if redirect_info is not None: # :type redirect_info: _RedirectInfo\n setattr(_RedirectionsHolder, redirect_to_name, None)\n\n stack = getattr(_RedirectionsHolder, "_stack_%s" % std)\n prev_info = stack.pop()\n\n curr = getattr(sys, std)\n if curr is redirect_info.redirect_to:\n setattr(sys, std, redirect_info.original)\n\n\n@contextmanager\ndef redirect_stream_to_pydb_io_messages_context():\n with _RedirectionsHolder._lock:\n redirecting = []\n for std in ("stdout", "stderr"):\n if redirect_stream_to_pydb_io_messages(std):\n redirecting.append(std)\n\n try:\n yield\n finally:\n for std in redirecting:\n stop_redirect_stream_to_pydb_io_messages(std)\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_io.py
|
pydevd_io.py
|
Python
| 8,371 | 0.95 | 0.253906 | 0.029703 |
node-utils
| 429 |
2024-02-29T16:38:27.539588
|
BSD-3-Clause
| false |
0054c4f79079b2768bb5ce46a1b5a8c5
|
from _pydevd_bundle.pydevd_constants import (\n DebugInfoHolder,\n get_global_debugger,\n GetGlobalDebugger,\n set_global_debugger,\n) # Keep for backward compatibility @UnusedImport\nfrom _pydevd_bundle.pydevd_utils import quote_smart as quote, to_string\nfrom _pydevd_bundle.pydevd_comm_constants import ID_TO_MEANING, CMD_EXIT\nfrom _pydevd_bundle.pydevd_constants import HTTP_PROTOCOL, HTTP_JSON_PROTOCOL, get_protocol, IS_JYTHON, ForkSafeLock\nimport json\nfrom _pydev_bundle import pydev_log\n\n\nclass _BaseNetCommand(object):\n # Command id. Should be set in instance.\n id = -1\n\n # Dict representation of the command to be set in instance. Only set for json commands.\n as_dict = None\n\n def send(self, *args, **kwargs):\n pass\n\n def call_after_send(self, callback):\n pass\n\n\nclass _NullNetCommand(_BaseNetCommand):\n pass\n\n\nclass _NullExitCommand(_NullNetCommand):\n id = CMD_EXIT\n\n\n# Constant meant to be passed to the writer when the command is meant to be ignored.\nNULL_NET_COMMAND = _NullNetCommand()\n\n# Exit command -- only internal (we don't want/need to send this to the IDE).\nNULL_EXIT_COMMAND = _NullExitCommand()\n\n\nclass NetCommand(_BaseNetCommand):\n """\n Commands received/sent over the network.\n\n Command can represent command received from the debugger,\n or one to be sent by daemon.\n """\n\n next_seq = 0 # sequence numbers\n\n _showing_debug_info = 0\n _show_debug_info_lock = ForkSafeLock(rlock=True)\n\n _after_send = None\n\n def __init__(self, cmd_id, seq, text, is_json=False):\n """\n If sequence is 0, new sequence will be generated (otherwise, this was the response\n to a command from the client).\n """\n protocol = get_protocol()\n self.id = cmd_id\n if seq == 0:\n NetCommand.next_seq += 2\n seq = NetCommand.next_seq\n\n self.seq = seq\n\n if is_json:\n if hasattr(text, "to_dict"):\n as_dict = text.to_dict(update_ids_to_dap=True)\n else:\n assert isinstance(text, dict)\n as_dict = text\n as_dict["pydevd_cmd_id"] = cmd_id\n as_dict["seq"] = seq\n self.as_dict = as_dict\n try:\n text = json.dumps(as_dict)\n except TypeError:\n text = json.dumps(as_dict, default=str)\n\n assert isinstance(text, str)\n\n if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:\n self._show_debug_info(cmd_id, seq, text)\n\n if is_json:\n msg = text\n else:\n if protocol not in (HTTP_PROTOCOL, HTTP_JSON_PROTOCOL):\n encoded = quote(to_string(text), '/<>_=" \t')\n msg = "%s\t%s\t%s\n" % (cmd_id, seq, encoded)\n\n else:\n msg = "%s\t%s\t%s" % (cmd_id, seq, text)\n\n if isinstance(msg, str):\n msg = msg.encode("utf-8")\n\n assert isinstance(msg, bytes)\n as_bytes = msg\n self._as_bytes = as_bytes\n\n def send(self, sock):\n as_bytes = self._as_bytes\n try:\n if get_protocol() in (HTTP_PROTOCOL, HTTP_JSON_PROTOCOL):\n sock.sendall(("Content-Length: %s\r\n\r\n" % len(as_bytes)).encode("ascii"))\n sock.sendall(as_bytes)\n if self._after_send:\n for method in self._after_send:\n method(sock)\n except:\n if IS_JYTHON:\n # Ignore errors in sock.sendall in Jython (seems to be common for Jython to\n # give spurious exceptions at interpreter shutdown here).\n pass\n else:\n raise\n\n def call_after_send(self, callback):\n if not self._after_send:\n self._after_send = [callback]\n else:\n self._after_send.append(callback)\n\n @classmethod\n def _show_debug_info(cls, cmd_id, seq, text):\n with cls._show_debug_info_lock:\n # Only one thread each time (rlock).\n if cls._showing_debug_info:\n # avoid recursing in the same thread (just printing could create\n # a new command when redirecting output).\n return\n\n cls._showing_debug_info += 1\n try:\n out_message = "sending cmd (%s) --> " % (get_protocol(),)\n out_message += "%20s" % ID_TO_MEANING.get(str(cmd_id), "UNKNOWN")\n out_message += " "\n out_message += text.replace("\n", " ")\n try:\n pydev_log.critical("%s\n", out_message)\n except:\n pass\n finally:\n cls._showing_debug_info -= 1\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_net_command.py
|
pydevd_net_command.py
|
Python
| 4,852 | 0.95 | 0.2 | 0.07563 |
react-lib
| 31 |
2023-08-17T17:12:50.051729
|
Apache-2.0
| false |
75f14cf646dc8c5911997ed8dd182f2f
|
from functools import partial\nimport itertools\nimport os\nimport sys\nimport socket as socket_module\n\nfrom _pydev_bundle._pydev_imports_tipper import TYPE_IMPORT, TYPE_CLASS, TYPE_FUNCTION, TYPE_ATTR, TYPE_BUILTIN, TYPE_PARAM\nfrom _pydev_bundle.pydev_is_thread_alive import is_thread_alive\nfrom _pydev_bundle.pydev_override import overrides\nfrom _pydevd_bundle._debug_adapter import pydevd_schema\nfrom _pydevd_bundle._debug_adapter.pydevd_schema import (\n ModuleEvent,\n ModuleEventBody,\n Module,\n OutputEventBody,\n OutputEvent,\n ContinuedEventBody,\n ExitedEventBody,\n ExitedEvent,\n)\nfrom _pydevd_bundle.pydevd_comm_constants import (\n CMD_THREAD_CREATE,\n CMD_RETURN,\n CMD_MODULE_EVENT,\n CMD_WRITE_TO_CONSOLE,\n CMD_STEP_INTO,\n CMD_STEP_INTO_MY_CODE,\n CMD_STEP_OVER,\n CMD_STEP_OVER_MY_CODE,\n CMD_STEP_RETURN,\n CMD_STEP_CAUGHT_EXCEPTION,\n CMD_ADD_EXCEPTION_BREAK,\n CMD_SET_BREAK,\n CMD_SET_NEXT_STATEMENT,\n CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION,\n CMD_THREAD_RESUME_SINGLE_NOTIFICATION,\n CMD_THREAD_KILL,\n CMD_STOP_ON_START,\n CMD_INPUT_REQUESTED,\n CMD_EXIT,\n CMD_STEP_INTO_COROUTINE,\n CMD_STEP_RETURN_MY_CODE,\n CMD_SMART_STEP_INTO,\n CMD_SET_FUNCTION_BREAK,\n CMD_THREAD_RUN,\n)\nfrom _pydevd_bundle.pydevd_constants import get_thread_id, ForkSafeLock, DebugInfoHolder\nfrom _pydevd_bundle.pydevd_net_command import NetCommand, NULL_NET_COMMAND\nfrom _pydevd_bundle.pydevd_net_command_factory_xml import NetCommandFactory\nfrom _pydevd_bundle.pydevd_utils import get_non_pydevd_threads\nimport pydevd_file_utils\nfrom _pydevd_bundle.pydevd_comm import build_exception_info_response\nfrom _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info\nfrom _pydevd_bundle import pydevd_frame_utils, pydevd_constants, pydevd_utils\nimport linecache\nfrom io import StringIO\nfrom _pydev_bundle import pydev_log\n\n\nclass ModulesManager(object):\n def __init__(self):\n self._lock = ForkSafeLock()\n self._modules = {}\n self._next_id = partial(next, itertools.count(0))\n\n def track_module(self, filename_in_utf8, module_name, frame):\n """\n :return list(NetCommand):\n Returns a list with the module events to be sent.\n """\n if filename_in_utf8 in self._modules:\n return []\n\n module_events = []\n with self._lock:\n # Must check again after getting the lock.\n if filename_in_utf8 in self._modules:\n return\n\n try:\n version = str(frame.f_globals.get("__version__", ""))\n except:\n version = "<unknown>"\n\n try:\n package_name = str(frame.f_globals.get("__package__", ""))\n except:\n package_name = "<unknown>"\n\n module_id = self._next_id()\n\n module = Module(module_id, module_name, filename_in_utf8)\n if version:\n module.version = version\n\n if package_name:\n # Note: package doesn't appear in the docs but seems to be expected?\n module.kwargs["package"] = package_name\n\n module_event = ModuleEvent(ModuleEventBody("new", module))\n\n module_events.append(NetCommand(CMD_MODULE_EVENT, 0, module_event, is_json=True))\n\n self._modules[filename_in_utf8] = module.to_dict()\n return module_events\n\n def get_modules_info(self):\n """\n :return list(Module)\n """\n with self._lock:\n return list(self._modules.values())\n\n\nclass NetCommandFactoryJson(NetCommandFactory):\n """\n Factory for commands which will provide messages as json (they should be\n similar to the debug adapter where possible, although some differences\n are currently Ok).\n\n Note that it currently overrides the xml version so that messages\n can be done one at a time (any message not overridden will currently\n use the xml version) -- after having all messages handled, it should\n no longer use NetCommandFactory as the base class.\n """\n\n def __init__(self):\n NetCommandFactory.__init__(self)\n self.modules_manager = ModulesManager()\n\n @overrides(NetCommandFactory.make_version_message)\n def make_version_message(self, seq):\n return NULL_NET_COMMAND # Not a part of the debug adapter protocol\n\n @overrides(NetCommandFactory.make_protocol_set_message)\n def make_protocol_set_message(self, seq):\n return NULL_NET_COMMAND # Not a part of the debug adapter protocol\n\n @overrides(NetCommandFactory.make_thread_created_message)\n def make_thread_created_message(self, thread):\n # Note: the thread id for the debug adapter must be an int\n # (make the actual id from get_thread_id respect that later on).\n msg = pydevd_schema.ThreadEvent(\n pydevd_schema.ThreadEventBody("started", get_thread_id(thread)),\n )\n\n return NetCommand(CMD_THREAD_CREATE, 0, msg, is_json=True)\n\n @overrides(NetCommandFactory.make_custom_frame_created_message)\n def make_custom_frame_created_message(self, frame_id, frame_description):\n self._additional_thread_id_to_thread_name[frame_id] = frame_description\n msg = pydevd_schema.ThreadEvent(\n pydevd_schema.ThreadEventBody("started", frame_id),\n )\n\n return NetCommand(CMD_THREAD_CREATE, 0, msg, is_json=True)\n\n @overrides(NetCommandFactory.make_thread_killed_message)\n def make_thread_killed_message(self, tid):\n self._additional_thread_id_to_thread_name.pop(tid, None)\n msg = pydevd_schema.ThreadEvent(\n pydevd_schema.ThreadEventBody("exited", tid),\n )\n\n return NetCommand(CMD_THREAD_KILL, 0, msg, is_json=True)\n\n @overrides(NetCommandFactory.make_list_threads_message)\n def make_list_threads_message(self, py_db, seq):\n threads = []\n for thread in get_non_pydevd_threads():\n if is_thread_alive(thread):\n thread_id = get_thread_id(thread)\n\n # Notify that it's created (no-op if we already notified before).\n py_db.notify_thread_created(thread_id, thread)\n\n thread_schema = pydevd_schema.Thread(id=thread_id, name=thread.name)\n threads.append(thread_schema.to_dict())\n\n for thread_id, thread_name in list(self._additional_thread_id_to_thread_name.items()):\n thread_schema = pydevd_schema.Thread(id=thread_id, name=thread_name)\n threads.append(thread_schema.to_dict())\n\n body = pydevd_schema.ThreadsResponseBody(threads)\n response = pydevd_schema.ThreadsResponse(request_seq=seq, success=True, command="threads", body=body)\n\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n @overrides(NetCommandFactory.make_get_completions_message)\n def make_get_completions_message(self, seq, completions, qualifier, start):\n COMPLETION_TYPE_LOOK_UP = {\n TYPE_IMPORT: pydevd_schema.CompletionItemType.MODULE,\n TYPE_CLASS: pydevd_schema.CompletionItemType.CLASS,\n TYPE_FUNCTION: pydevd_schema.CompletionItemType.FUNCTION,\n TYPE_ATTR: pydevd_schema.CompletionItemType.FIELD,\n TYPE_BUILTIN: pydevd_schema.CompletionItemType.KEYWORD,\n TYPE_PARAM: pydevd_schema.CompletionItemType.VARIABLE,\n }\n\n qualifier = qualifier.lower()\n qualifier_len = len(qualifier)\n targets = []\n for completion in completions:\n label = completion[0]\n if label.lower().startswith(qualifier):\n completion = pydevd_schema.CompletionItem(\n label=label, type=COMPLETION_TYPE_LOOK_UP[completion[3]], start=start, length=qualifier_len\n )\n targets.append(completion.to_dict())\n\n body = pydevd_schema.CompletionsResponseBody(targets)\n response = pydevd_schema.CompletionsResponse(request_seq=seq, success=True, command="completions", body=body)\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def _format_frame_name(self, fmt, initial_name, module_name, line, path):\n if fmt is None:\n return initial_name\n frame_name = initial_name\n if fmt.get("module", False):\n if module_name:\n if initial_name == "<module>":\n frame_name = module_name\n else:\n frame_name = "%s.%s" % (module_name, initial_name)\n else:\n basename = os.path.basename(path)\n basename = basename[0:-3] if basename.lower().endswith(".py") else basename\n if initial_name == "<module>":\n frame_name = "%s in %s" % (initial_name, basename)\n else:\n frame_name = "%s.%s" % (basename, initial_name)\n\n if fmt.get("line", False):\n frame_name = "%s : %d" % (frame_name, line)\n\n return frame_name\n\n @overrides(NetCommandFactory.make_get_thread_stack_message)\n def make_get_thread_stack_message(self, py_db, seq, thread_id, topmost_frame, fmt, must_be_suspended=False, start_frame=0, levels=0):\n frames = []\n module_events = []\n\n try:\n # : :type suspended_frames_manager: SuspendedFramesManager\n suspended_frames_manager = py_db.suspended_frames_manager\n frames_list = suspended_frames_manager.get_frames_list(thread_id)\n if frames_list is None:\n # Could not find stack of suspended frame...\n if must_be_suspended:\n return None\n else:\n frames_list = pydevd_frame_utils.create_frames_list_from_frame(topmost_frame)\n\n for (\n frame_id,\n frame,\n method_name,\n original_filename,\n filename_in_utf8,\n lineno,\n applied_mapping,\n show_as_current_frame,\n line_col_info,\n ) in self._iter_visible_frames_info(py_db, frames_list, flatten_chained=True):\n try:\n module_name = str(frame.f_globals.get("__name__", ""))\n except:\n module_name = "<unknown>"\n\n module_events.extend(self.modules_manager.track_module(filename_in_utf8, module_name, frame))\n\n presentation_hint = None\n if not getattr(frame, "IS_PLUGIN_FRAME", False): # Never filter out plugin frames!\n if py_db.is_files_filter_enabled and py_db.apply_files_filter(frame, original_filename, False):\n continue\n\n if not py_db.in_project_scope(frame):\n presentation_hint = "subtle"\n\n formatted_name = self._format_frame_name(fmt, method_name, module_name, lineno, filename_in_utf8)\n if show_as_current_frame:\n formatted_name += " (Current frame)"\n source_reference = pydevd_file_utils.get_client_filename_source_reference(filename_in_utf8)\n\n if not source_reference and not applied_mapping and not os.path.exists(original_filename):\n if getattr(frame.f_code, "co_lines", None) or getattr(frame.f_code, "co_lnotab", None):\n # Create a source-reference to be used where we provide the source by decompiling the code.\n # Note: When the time comes to retrieve the source reference in this case, we'll\n # check the linecache first (see: get_decompiled_source_from_frame_id).\n source_reference = pydevd_file_utils.create_source_reference_for_frame_id(frame_id, original_filename)\n else:\n # Check if someone added a source reference to the linecache (Python attrs does this).\n if linecache.getline(original_filename, 1):\n source_reference = pydevd_file_utils.create_source_reference_for_linecache(original_filename)\n\n column = 1\n endcol = None\n if line_col_info is not None:\n try:\n line_text = linecache.getline(original_filename, lineno)\n except:\n if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 2:\n pydev_log.exception("Unable to get line from linecache for file: %s", original_filename)\n else:\n if line_text:\n colno, endcolno = line_col_info.map_columns_to_line(line_text)\n column = colno + 1\n if line_col_info.lineno == line_col_info.end_lineno:\n endcol = endcolno + 1\n\n frames.append(\n pydevd_schema.StackFrame(\n frame_id,\n formatted_name,\n lineno,\n column=column,\n endColumn=endcol,\n source={\n "path": filename_in_utf8,\n "sourceReference": source_reference,\n },\n presentationHint=presentation_hint,\n ).to_dict()\n )\n finally:\n topmost_frame = None\n\n for module_event in module_events:\n py_db.writer.add_command(module_event)\n\n total_frames = len(frames)\n stack_frames = frames\n if bool(levels):\n start = start_frame\n end = min(start + levels, total_frames)\n stack_frames = frames[start:end]\n\n response = pydevd_schema.StackTraceResponse(\n request_seq=seq,\n success=True,\n command="stackTrace",\n body=pydevd_schema.StackTraceResponseBody(stackFrames=stack_frames, totalFrames=total_frames),\n )\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n @overrides(NetCommandFactory.make_warning_message)\n def make_warning_message(self, msg):\n category = "important"\n body = OutputEventBody(msg, category)\n event = OutputEvent(body)\n return NetCommand(CMD_WRITE_TO_CONSOLE, 0, event, is_json=True)\n\n @overrides(NetCommandFactory.make_io_message)\n def make_io_message(self, msg, ctx):\n category = "stdout" if int(ctx) == 1 else "stderr"\n body = OutputEventBody(msg, category)\n event = OutputEvent(body)\n return NetCommand(CMD_WRITE_TO_CONSOLE, 0, event, is_json=True)\n\n @overrides(NetCommandFactory.make_console_message)\n def make_console_message(self, msg):\n category = "console"\n body = OutputEventBody(msg, category)\n event = OutputEvent(body)\n return NetCommand(CMD_WRITE_TO_CONSOLE, 0, event, is_json=True)\n\n _STEP_REASONS = set(\n [\n CMD_STEP_INTO,\n CMD_STEP_INTO_MY_CODE,\n CMD_STEP_OVER,\n CMD_STEP_OVER_MY_CODE,\n CMD_STEP_RETURN,\n CMD_STEP_RETURN_MY_CODE,\n CMD_STEP_INTO_MY_CODE,\n CMD_STOP_ON_START,\n CMD_STEP_INTO_COROUTINE,\n CMD_SMART_STEP_INTO,\n ]\n )\n _EXCEPTION_REASONS = set(\n [\n CMD_STEP_CAUGHT_EXCEPTION,\n CMD_ADD_EXCEPTION_BREAK,\n ]\n )\n\n @overrides(NetCommandFactory.make_thread_suspend_single_notification)\n def make_thread_suspend_single_notification(self, py_db, thread_id, thread, stop_reason):\n exc_desc = None\n exc_name = None\n info = set_additional_thread_info(thread)\n\n preserve_focus_hint = False\n if stop_reason in self._STEP_REASONS:\n if info.pydev_original_step_cmd == CMD_STOP_ON_START:\n # Just to make sure that's not set as the original reason anymore.\n info.pydev_original_step_cmd = -1\n stop_reason = "entry"\n else:\n stop_reason = "step"\n elif stop_reason in self._EXCEPTION_REASONS:\n stop_reason = "exception"\n elif stop_reason == CMD_SET_BREAK:\n stop_reason = "breakpoint"\n elif stop_reason == CMD_SET_FUNCTION_BREAK:\n stop_reason = "function breakpoint"\n elif stop_reason == CMD_SET_NEXT_STATEMENT:\n stop_reason = "goto"\n else:\n stop_reason = "pause"\n preserve_focus_hint = True\n\n if stop_reason == "exception":\n exception_info_response = build_exception_info_response(\n py_db, thread_id, thread, -1, set_additional_thread_info, self._iter_visible_frames_info, max_frames=-1\n )\n exception_info_response\n\n exc_name = exception_info_response.body.exceptionId\n exc_desc = exception_info_response.body.description\n\n body = pydevd_schema.StoppedEventBody(\n reason=stop_reason,\n description=exc_desc,\n threadId=thread_id,\n text=exc_name,\n allThreadsStopped=True,\n preserveFocusHint=preserve_focus_hint,\n )\n event = pydevd_schema.StoppedEvent(body)\n return NetCommand(CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION, 0, event, is_json=True)\n\n @overrides(NetCommandFactory.make_thread_resume_single_notification)\n def make_thread_resume_single_notification(self, thread_id):\n body = ContinuedEventBody(threadId=thread_id, allThreadsContinued=True)\n event = pydevd_schema.ContinuedEvent(body)\n return NetCommand(CMD_THREAD_RESUME_SINGLE_NOTIFICATION, 0, event, is_json=True)\n\n @overrides(NetCommandFactory.make_set_next_stmnt_status_message)\n def make_set_next_stmnt_status_message(self, seq, is_success, exception_msg):\n response = pydevd_schema.GotoResponse(\n request_seq=int(seq), success=is_success, command="goto", body={}, message=(None if is_success else exception_msg)\n )\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n @overrides(NetCommandFactory.make_send_curr_exception_trace_message)\n def make_send_curr_exception_trace_message(self, *args, **kwargs):\n return NULL_NET_COMMAND # Not a part of the debug adapter protocol\n\n @overrides(NetCommandFactory.make_send_curr_exception_trace_proceeded_message)\n def make_send_curr_exception_trace_proceeded_message(self, *args, **kwargs):\n return NULL_NET_COMMAND # Not a part of the debug adapter protocol\n\n @overrides(NetCommandFactory.make_send_breakpoint_exception_message)\n def make_send_breakpoint_exception_message(self, *args, **kwargs):\n return NULL_NET_COMMAND # Not a part of the debug adapter protocol\n\n @overrides(NetCommandFactory.make_process_created_message)\n def make_process_created_message(self, *args, **kwargs):\n return NULL_NET_COMMAND # Not a part of the debug adapter protocol\n\n @overrides(NetCommandFactory.make_process_about_to_be_replaced_message)\n def make_process_about_to_be_replaced_message(self):\n event = ExitedEvent(ExitedEventBody(-1, pydevdReason="processReplaced"))\n\n cmd = NetCommand(CMD_RETURN, 0, event, is_json=True)\n\n def after_send(socket):\n socket.setsockopt(socket_module.IPPROTO_TCP, socket_module.TCP_NODELAY, 1)\n\n cmd.call_after_send(after_send)\n return cmd\n\n @overrides(NetCommandFactory.make_thread_suspend_message)\n def make_thread_suspend_message(self, py_db, thread_id, frames_list, stop_reason, message, trace_suspend_type, thread, info):\n from _pydevd_bundle.pydevd_comm_constants import CMD_THREAD_SUSPEND\n\n if py_db.multi_threads_single_notification:\n pydev_log.debug("Skipping per-thread thread suspend notification.")\n return NULL_NET_COMMAND # Don't send per-thread, send a single one.\n pydev_log.debug("Sending per-thread thread suspend notification (stop_reason: %s)", stop_reason)\n\n exc_desc = None\n exc_name = None\n preserve_focus_hint = False\n if stop_reason in self._STEP_REASONS:\n if info.pydev_original_step_cmd == CMD_STOP_ON_START:\n # Just to make sure that's not set as the original reason anymore.\n info.pydev_original_step_cmd = -1\n stop_reason = "entry"\n else:\n stop_reason = "step"\n elif stop_reason in self._EXCEPTION_REASONS:\n stop_reason = "exception"\n elif stop_reason == CMD_SET_BREAK:\n stop_reason = "breakpoint"\n elif stop_reason == CMD_SET_FUNCTION_BREAK:\n stop_reason = "function breakpoint"\n elif stop_reason == CMD_SET_NEXT_STATEMENT:\n stop_reason = "goto"\n else:\n stop_reason = "pause"\n preserve_focus_hint = True\n\n if stop_reason == "exception":\n exception_info_response = build_exception_info_response(\n py_db, thread_id, thread, -1, set_additional_thread_info, self._iter_visible_frames_info, max_frames=-1\n )\n exception_info_response\n\n exc_name = exception_info_response.body.exceptionId\n exc_desc = exception_info_response.body.description\n\n body = pydevd_schema.StoppedEventBody(\n reason=stop_reason,\n description=exc_desc,\n threadId=thread_id,\n text=exc_name,\n allThreadsStopped=False,\n preserveFocusHint=preserve_focus_hint,\n )\n event = pydevd_schema.StoppedEvent(body)\n return NetCommand(CMD_THREAD_SUSPEND, 0, event, is_json=True)\n\n @overrides(NetCommandFactory.make_thread_run_message)\n def make_thread_run_message(self, py_db, thread_id, reason):\n if py_db.multi_threads_single_notification:\n return NULL_NET_COMMAND # Don't send per-thread, send a single one.\n body = ContinuedEventBody(threadId=thread_id, allThreadsContinued=False)\n event = pydevd_schema.ContinuedEvent(body)\n return NetCommand(CMD_THREAD_RUN, 0, event, is_json=True)\n\n @overrides(NetCommandFactory.make_reloaded_code_message)\n def make_reloaded_code_message(self, *args, **kwargs):\n return NULL_NET_COMMAND # Not a part of the debug adapter protocol\n\n @overrides(NetCommandFactory.make_input_requested_message)\n def make_input_requested_message(self, started):\n event = pydevd_schema.PydevdInputRequestedEvent(body={})\n return NetCommand(CMD_INPUT_REQUESTED, 0, event, is_json=True)\n\n @overrides(NetCommandFactory.make_skipped_step_in_because_of_filters)\n def make_skipped_step_in_because_of_filters(self, py_db, frame):\n msg = "Frame skipped from debugging during step-in."\n if py_db.get_use_libraries_filter():\n msg += (\n '\nNote: may have been skipped because of "justMyCode" option (default == true). '\n 'Try setting "justMyCode": false in the debug configuration (e.g., launch.json).\n'\n )\n return self.make_warning_message(msg)\n\n @overrides(NetCommandFactory.make_evaluation_timeout_msg)\n def make_evaluation_timeout_msg(self, py_db, expression, curr_thread):\n msg = """Evaluating: %s did not finish after %.2f seconds.\nThis may mean a number of things:\n- This evaluation is really slow and this is expected.\n In this case it's possible to silence this error by raising the timeout, setting the\n PYDEVD_WARN_EVALUATION_TIMEOUT environment variable to a bigger value.\n\n- The evaluation may need other threads running while it's running:\n In this case, it's possible to set the PYDEVD_UNBLOCK_THREADS_TIMEOUT\n environment variable so that if after a given timeout an evaluation doesn't finish,\n other threads are unblocked or you can manually resume all threads.\n\n Alternatively, it's also possible to skip breaking on a particular thread by setting a\n `pydev_do_not_trace = True` attribute in the related threading.Thread instance\n (if some thread should always be running and no breakpoints are expected to be hit in it).\n\n- The evaluation is deadlocked:\n In this case you may set the PYDEVD_THREAD_DUMP_ON_WARN_EVALUATION_TIMEOUT\n environment variable to true so that a thread dump is shown along with this message and\n optionally, set the PYDEVD_INTERRUPT_THREAD_TIMEOUT to some value so that the debugger\n tries to interrupt the evaluation (if possible) when this happens.\n""" % (expression, pydevd_constants.PYDEVD_WARN_EVALUATION_TIMEOUT)\n\n if pydevd_constants.PYDEVD_THREAD_DUMP_ON_WARN_EVALUATION_TIMEOUT:\n stream = StringIO()\n pydevd_utils.dump_threads(stream, show_pydevd_threads=False)\n msg += "\n\n%s\n" % stream.getvalue()\n return self.make_warning_message(msg)\n\n @overrides(NetCommandFactory.make_exit_command)\n def make_exit_command(self, py_db):\n event = pydevd_schema.TerminatedEvent(pydevd_schema.TerminatedEventBody())\n return NetCommand(CMD_EXIT, 0, event, is_json=True)\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_net_command_factory_json.py
|
pydevd_net_command_factory_json.py
|
Python
| 25,782 | 0.95 | 0.162116 | 0.026 |
node-utils
| 934 |
2025-05-04T11:38:48.294816
|
BSD-3-Clause
| false |
ed541e9d2f1c5f4ebfb4543acbba74b8
|
import json\n\nfrom _pydev_bundle.pydev_is_thread_alive import is_thread_alive\nfrom _pydev_bundle._pydev_saved_modules import thread\nfrom _pydevd_bundle import pydevd_xml, pydevd_frame_utils, pydevd_constants, pydevd_utils\nfrom _pydevd_bundle.pydevd_comm_constants import (\n CMD_THREAD_CREATE,\n CMD_THREAD_KILL,\n CMD_THREAD_SUSPEND,\n CMD_THREAD_RUN,\n CMD_GET_VARIABLE,\n CMD_EVALUATE_EXPRESSION,\n CMD_GET_FRAME,\n CMD_WRITE_TO_CONSOLE,\n CMD_GET_COMPLETIONS,\n CMD_LOAD_SOURCE,\n CMD_SET_NEXT_STATEMENT,\n CMD_EXIT,\n CMD_GET_FILE_CONTENTS,\n CMD_EVALUATE_CONSOLE_EXPRESSION,\n CMD_RUN_CUSTOM_OPERATION,\n CMD_GET_BREAKPOINT_EXCEPTION,\n CMD_SEND_CURR_EXCEPTION_TRACE,\n CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED,\n CMD_SHOW_CONSOLE,\n CMD_GET_ARRAY,\n CMD_INPUT_REQUESTED,\n CMD_GET_DESCRIPTION,\n CMD_PROCESS_CREATED,\n CMD_SHOW_CYTHON_WARNING,\n CMD_LOAD_FULL_VALUE,\n CMD_GET_THREAD_STACK,\n CMD_GET_EXCEPTION_DETAILS,\n CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION,\n CMD_THREAD_RESUME_SINGLE_NOTIFICATION,\n CMD_GET_NEXT_STATEMENT_TARGETS,\n CMD_VERSION,\n CMD_RETURN,\n CMD_SET_PROTOCOL,\n CMD_ERROR,\n MAX_IO_MSG_SIZE,\n VERSION_STRING,\n CMD_RELOAD_CODE,\n CMD_LOAD_SOURCE_FROM_FRAME_ID,\n)\nfrom _pydevd_bundle.pydevd_constants import (\n DebugInfoHolder,\n get_thread_id,\n get_global_debugger,\n GetGlobalDebugger,\n set_global_debugger,\n) # Keep for backward compatibility @UnusedImport\nfrom _pydevd_bundle.pydevd_net_command import NetCommand, NULL_NET_COMMAND, NULL_EXIT_COMMAND\nfrom _pydevd_bundle.pydevd_utils import quote_smart as quote, get_non_pydevd_threads\nfrom pydevd_file_utils import get_abs_path_real_path_and_base_from_frame\nimport pydevd_file_utils\nfrom pydevd_tracing import get_exception_traceback_str\nfrom _pydev_bundle._pydev_completer import completions_to_xml\nfrom _pydev_bundle import pydev_log\nfrom _pydevd_bundle.pydevd_frame_utils import FramesList\nfrom io import StringIO\n\n\n# =======================================================================================================================\n# NetCommandFactory\n# =======================================================================================================================\nclass NetCommandFactory(object):\n def __init__(self):\n self._additional_thread_id_to_thread_name = {}\n\n def _thread_to_xml(self, thread):\n """thread information as XML"""\n name = pydevd_xml.make_valid_xml_value(thread.name)\n cmd_text = '<thread name="%s" id="%s" />' % (quote(name), get_thread_id(thread))\n return cmd_text\n\n def make_error_message(self, seq, text):\n cmd = NetCommand(CMD_ERROR, seq, text)\n if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:\n pydev_log.error("Error: %s" % (text,))\n return cmd\n\n def make_protocol_set_message(self, seq):\n return NetCommand(CMD_SET_PROTOCOL, seq, "")\n\n def make_thread_created_message(self, thread):\n cmdText = "<xml>" + self._thread_to_xml(thread) + "</xml>"\n return NetCommand(CMD_THREAD_CREATE, 0, cmdText)\n\n def make_process_created_message(self):\n cmdText = "<process/>"\n return NetCommand(CMD_PROCESS_CREATED, 0, cmdText)\n\n def make_process_about_to_be_replaced_message(self):\n return NULL_NET_COMMAND\n\n def make_show_cython_warning_message(self):\n try:\n return NetCommand(CMD_SHOW_CYTHON_WARNING, 0, "")\n except:\n return self.make_error_message(0, get_exception_traceback_str())\n\n def make_custom_frame_created_message(self, frame_id, frame_description):\n self._additional_thread_id_to_thread_name[frame_id] = frame_description\n frame_description = pydevd_xml.make_valid_xml_value(frame_description)\n return NetCommand(CMD_THREAD_CREATE, 0, '<xml><thread name="%s" id="%s"/></xml>' % (frame_description, frame_id))\n\n def make_list_threads_message(self, py_db, seq):\n """returns thread listing as XML"""\n try:\n threads = get_non_pydevd_threads()\n cmd_text = ["<xml>"]\n append = cmd_text.append\n for thread in threads:\n if is_thread_alive(thread):\n append(self._thread_to_xml(thread))\n\n for thread_id, thread_name in list(self._additional_thread_id_to_thread_name.items()):\n name = pydevd_xml.make_valid_xml_value(thread_name)\n append('<thread name="%s" id="%s" />' % (quote(name), thread_id))\n\n append("</xml>")\n return NetCommand(CMD_RETURN, seq, "".join(cmd_text))\n except:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def make_get_thread_stack_message(self, py_db, seq, thread_id, topmost_frame, fmt, must_be_suspended=False, start_frame=0, levels=0):\n """\n Returns thread stack as XML.\n\n :param must_be_suspended: If True and the thread is not suspended, returns None.\n """\n try:\n # If frame is None, the return is an empty frame list.\n cmd_text = ['<xml><thread id="%s">' % (thread_id,)]\n\n if topmost_frame is not None:\n try:\n # : :type suspended_frames_manager: SuspendedFramesManager\n suspended_frames_manager = py_db.suspended_frames_manager\n frames_list = suspended_frames_manager.get_frames_list(thread_id)\n if frames_list is None:\n # Could not find stack of suspended frame...\n if must_be_suspended:\n return None\n else:\n frames_list = pydevd_frame_utils.create_frames_list_from_frame(topmost_frame)\n\n cmd_text.append(self.make_thread_stack_str(py_db, frames_list))\n finally:\n topmost_frame = None\n cmd_text.append("</thread></xml>")\n return NetCommand(CMD_GET_THREAD_STACK, seq, "".join(cmd_text))\n except:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def make_variable_changed_message(self, seq, payload):\n # notify debugger that value was changed successfully\n return NetCommand(CMD_RETURN, seq, payload)\n\n def make_warning_message(self, msg):\n return self.make_io_message(msg, 2)\n\n def make_console_message(self, msg):\n return self.make_io_message(msg, 2)\n\n def make_io_message(self, msg, ctx):\n """\n @param msg: the message to pass to the debug server\n @param ctx: 1 for stdio 2 for stderr\n """\n try:\n msg = pydevd_constants.as_str(msg)\n\n if len(msg) > MAX_IO_MSG_SIZE:\n msg = msg[0:MAX_IO_MSG_SIZE]\n msg += "..."\n\n msg = pydevd_xml.make_valid_xml_value(quote(msg, "/>_= "))\n return NetCommand(str(CMD_WRITE_TO_CONSOLE), 0, '<xml><io s="%s" ctx="%s"/></xml>' % (msg, ctx))\n except:\n return self.make_error_message(0, get_exception_traceback_str())\n\n def make_version_message(self, seq):\n try:\n return NetCommand(CMD_VERSION, seq, VERSION_STRING)\n except:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def make_thread_killed_message(self, tid):\n self._additional_thread_id_to_thread_name.pop(tid, None)\n try:\n return NetCommand(CMD_THREAD_KILL, 0, str(tid))\n except:\n return self.make_error_message(0, get_exception_traceback_str())\n\n def _iter_visible_frames_info(self, py_db, frames_list, flatten_chained=False):\n assert frames_list.__class__ == FramesList\n is_chained = False\n while True:\n for frame in frames_list:\n show_as_current_frame = frame is frames_list.current_frame\n if frame.f_code is None:\n pydev_log.info("Frame without f_code: %s", frame)\n continue # IronPython sometimes does not have it!\n\n method_name = frame.f_code.co_name # method name (if in method) or ? if global\n if method_name is None:\n pydev_log.info("Frame without co_name: %s", frame)\n continue # IronPython sometimes does not have it!\n\n if is_chained:\n method_name = "[Chained Exc: %s] %s" % (frames_list.exc_desc, method_name)\n\n abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(frame)\n if py_db.get_file_type(frame, abs_path_real_path_and_base) == py_db.PYDEV_FILE:\n # Skip pydevd files.\n frame = frame.f_back\n continue\n\n frame_id = id(frame)\n lineno = frames_list.frame_id_to_lineno.get(frame_id, frame.f_lineno)\n line_col_info = frames_list.frame_id_to_line_col_info.get(frame_id)\n\n filename_in_utf8, lineno, changed = py_db.source_mapping.map_to_client(abs_path_real_path_and_base[0], lineno)\n new_filename_in_utf8, applied_mapping = pydevd_file_utils.map_file_to_client(filename_in_utf8)\n applied_mapping = applied_mapping or changed\n\n yield (\n frame_id,\n frame,\n method_name,\n abs_path_real_path_and_base[0],\n new_filename_in_utf8,\n lineno,\n applied_mapping,\n show_as_current_frame,\n line_col_info,\n )\n\n if not flatten_chained:\n break\n\n frames_list = frames_list.chained_frames_list\n if frames_list is None or len(frames_list) == 0:\n break\n is_chained = True\n\n def make_thread_stack_str(self, py_db, frames_list):\n assert frames_list.__class__ == FramesList\n make_valid_xml_value = pydevd_xml.make_valid_xml_value\n cmd_text_list = []\n append = cmd_text_list.append\n\n try:\n for (\n frame_id,\n frame,\n method_name,\n _original_filename,\n filename_in_utf8,\n lineno,\n _applied_mapping,\n _show_as_current_frame,\n line_col_info,\n ) in self._iter_visible_frames_info(py_db, frames_list, flatten_chained=True):\n # print("file is ", filename_in_utf8)\n # print("line is ", lineno)\n\n # Note: variables are all gotten 'on-demand'.\n append('<frame id="%s" name="%s" ' % (frame_id, make_valid_xml_value(method_name)))\n append('file="%s" line="%s">' % (quote(make_valid_xml_value(filename_in_utf8), "/>_= \t"), lineno))\n append("</frame>")\n except:\n pydev_log.exception()\n\n return "".join(cmd_text_list)\n\n def make_thread_suspend_str(\n self,\n py_db,\n thread_id,\n frames_list,\n stop_reason=None,\n message=None,\n trace_suspend_type="trace",\n ):\n """\n :return tuple(str,str):\n Returns tuple(thread_suspended_str, thread_stack_str).\n\n i.e.:\n (\n '''\n <xml>\n <thread id="id" stop_reason="reason">\n <frame id="id" name="functionName " file="file" line="line">\n </frame>\n </thread>\n </xml>\n '''\n ,\n '''\n <frame id="id" name="functionName " file="file" line="line">\n </frame>\n '''\n )\n """\n assert frames_list.__class__ == FramesList\n make_valid_xml_value = pydevd_xml.make_valid_xml_value\n cmd_text_list = []\n append = cmd_text_list.append\n\n cmd_text_list.append("<xml>")\n if message:\n message = make_valid_xml_value(message)\n\n append('<thread id="%s"' % (thread_id,))\n if stop_reason is not None:\n append(' stop_reason="%s"' % (stop_reason,))\n if message is not None:\n append(' message="%s"' % (message,))\n if trace_suspend_type is not None:\n append(' suspend_type="%s"' % (trace_suspend_type,))\n append(">")\n thread_stack_str = self.make_thread_stack_str(py_db, frames_list)\n append(thread_stack_str)\n append("</thread></xml>")\n\n return "".join(cmd_text_list), thread_stack_str\n\n def make_thread_suspend_message(self, py_db, thread_id, frames_list, stop_reason, message, trace_suspend_type, thread, additional_info):\n try:\n thread_suspend_str, thread_stack_str = self.make_thread_suspend_str(\n py_db, thread_id, frames_list, stop_reason, message, trace_suspend_type\n )\n cmd = NetCommand(CMD_THREAD_SUSPEND, 0, thread_suspend_str)\n cmd.thread_stack_str = thread_stack_str\n cmd.thread_suspend_str = thread_suspend_str\n return cmd\n except:\n return self.make_error_message(0, get_exception_traceback_str())\n\n def make_thread_suspend_single_notification(self, py_db, thread_id, thread, stop_reason):\n try:\n return NetCommand(CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION, 0, json.dumps({"thread_id": thread_id, "stop_reason": stop_reason}))\n except:\n return self.make_error_message(0, get_exception_traceback_str())\n\n def make_thread_resume_single_notification(self, thread_id):\n try:\n return NetCommand(CMD_THREAD_RESUME_SINGLE_NOTIFICATION, 0, json.dumps({"thread_id": thread_id}))\n except:\n return self.make_error_message(0, get_exception_traceback_str())\n\n def make_thread_run_message(self, py_db, thread_id, reason):\n try:\n return NetCommand(CMD_THREAD_RUN, 0, "%s\t%s" % (thread_id, reason))\n except:\n return self.make_error_message(0, get_exception_traceback_str())\n\n def make_get_variable_message(self, seq, payload):\n try:\n return NetCommand(CMD_GET_VARIABLE, seq, payload)\n except Exception:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def make_get_array_message(self, seq, payload):\n try:\n return NetCommand(CMD_GET_ARRAY, seq, payload)\n except Exception:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def make_get_description_message(self, seq, payload):\n try:\n return NetCommand(CMD_GET_DESCRIPTION, seq, payload)\n except Exception:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def make_get_frame_message(self, seq, payload):\n try:\n return NetCommand(CMD_GET_FRAME, seq, payload)\n except Exception:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def make_evaluate_expression_message(self, seq, payload):\n try:\n return NetCommand(CMD_EVALUATE_EXPRESSION, seq, payload)\n except Exception:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def make_get_completions_message(self, seq, completions, qualifier, start):\n try:\n payload = completions_to_xml(completions)\n return NetCommand(CMD_GET_COMPLETIONS, seq, payload)\n except Exception:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def make_get_file_contents(self, seq, payload):\n try:\n return NetCommand(CMD_GET_FILE_CONTENTS, seq, payload)\n except Exception:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def make_reloaded_code_message(self, seq, reloaded_ok):\n try:\n return NetCommand(CMD_RELOAD_CODE, seq, '<xml><reloaded ok="%s"></reloaded></xml>' % reloaded_ok)\n except Exception:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def make_send_breakpoint_exception_message(self, seq, payload):\n try:\n return NetCommand(CMD_GET_BREAKPOINT_EXCEPTION, seq, payload)\n except Exception:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def _make_send_curr_exception_trace_str(self, py_db, thread_id, exc_type, exc_desc, trace_obj):\n frames_list = pydevd_frame_utils.create_frames_list_from_traceback(trace_obj, None, exc_type, exc_desc)\n\n exc_type = pydevd_xml.make_valid_xml_value(str(exc_type)).replace("\t", " ") or "exception: type unknown"\n exc_desc = pydevd_xml.make_valid_xml_value(str(exc_desc)).replace("\t", " ") or "exception: no description"\n\n thread_suspend_str, thread_stack_str = self.make_thread_suspend_str(\n py_db, thread_id, frames_list, CMD_SEND_CURR_EXCEPTION_TRACE, ""\n )\n return exc_type, exc_desc, thread_suspend_str, thread_stack_str\n\n def make_send_curr_exception_trace_message(self, py_db, seq, thread_id, curr_frame_id, exc_type, exc_desc, trace_obj):\n try:\n exc_type, exc_desc, thread_suspend_str, _thread_stack_str = self._make_send_curr_exception_trace_str(\n py_db, thread_id, exc_type, exc_desc, trace_obj\n )\n payload = str(curr_frame_id) + "\t" + exc_type + "\t" + exc_desc + "\t" + thread_suspend_str\n return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE, seq, payload)\n except Exception:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def make_get_exception_details_message(self, py_db, seq, thread_id, topmost_frame):\n """Returns exception details as XML"""\n try:\n # If the debugger is not suspended, just return the thread and its id.\n cmd_text = ['<xml><thread id="%s" ' % (thread_id,)]\n\n if topmost_frame is not None:\n try:\n frame = topmost_frame\n topmost_frame = None\n while frame is not None:\n if frame.f_code.co_name == "do_wait_suspend" and frame.f_code.co_filename.endswith("pydevd.py"):\n arg = frame.f_locals.get("arg", None)\n if arg is not None:\n exc_type, exc_desc, _thread_suspend_str, thread_stack_str = self._make_send_curr_exception_trace_str(\n py_db, thread_id, *arg\n )\n cmd_text.append('exc_type="%s" ' % (exc_type,))\n cmd_text.append('exc_desc="%s" ' % (exc_desc,))\n cmd_text.append(">")\n cmd_text.append(thread_stack_str)\n break\n frame = frame.f_back\n else:\n cmd_text.append(">")\n finally:\n frame = None\n cmd_text.append("</thread></xml>")\n return NetCommand(CMD_GET_EXCEPTION_DETAILS, seq, "".join(cmd_text))\n except:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def make_send_curr_exception_trace_proceeded_message(self, seq, thread_id):\n try:\n return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED, 0, str(thread_id))\n except:\n return self.make_error_message(0, get_exception_traceback_str())\n\n def make_send_console_message(self, seq, payload):\n try:\n return NetCommand(CMD_EVALUATE_CONSOLE_EXPRESSION, seq, payload)\n except Exception:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def make_custom_operation_message(self, seq, payload):\n try:\n return NetCommand(CMD_RUN_CUSTOM_OPERATION, seq, payload)\n except Exception:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def make_load_source_message(self, seq, source):\n return NetCommand(CMD_LOAD_SOURCE, seq, source)\n\n def make_load_source_from_frame_id_message(self, seq, source):\n return NetCommand(CMD_LOAD_SOURCE_FROM_FRAME_ID, seq, source)\n\n def make_show_console_message(self, py_db, thread_id, frame):\n try:\n frames_list = pydevd_frame_utils.create_frames_list_from_frame(frame)\n thread_suspended_str, _thread_stack_str = self.make_thread_suspend_str(py_db, thread_id, frames_list, CMD_SHOW_CONSOLE, "")\n return NetCommand(CMD_SHOW_CONSOLE, 0, thread_suspended_str)\n except:\n return self.make_error_message(0, get_exception_traceback_str())\n\n def make_input_requested_message(self, started):\n try:\n return NetCommand(CMD_INPUT_REQUESTED, 0, str(started))\n except:\n return self.make_error_message(0, get_exception_traceback_str())\n\n def make_set_next_stmnt_status_message(self, seq, is_success, exception_msg):\n try:\n message = str(is_success) + "\t" + exception_msg\n return NetCommand(CMD_SET_NEXT_STATEMENT, int(seq), message)\n except:\n return self.make_error_message(0, get_exception_traceback_str())\n\n def make_load_full_value_message(self, seq, payload):\n try:\n return NetCommand(CMD_LOAD_FULL_VALUE, seq, payload)\n except Exception:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def make_get_next_statement_targets_message(self, seq, payload):\n try:\n return NetCommand(CMD_GET_NEXT_STATEMENT_TARGETS, seq, payload)\n except Exception:\n return self.make_error_message(seq, get_exception_traceback_str())\n\n def make_skipped_step_in_because_of_filters(self, py_db, frame):\n return NULL_NET_COMMAND # Not a part of the xml protocol\n\n def make_evaluation_timeout_msg(self, py_db, expression, thread):\n msg = """pydevd: Evaluating: %s did not finish after %.2f seconds.\nThis may mean a number of things:\n- This evaluation is really slow and this is expected.\n In this case it's possible to silence this error by raising the timeout, setting the\n PYDEVD_WARN_EVALUATION_TIMEOUT environment variable to a bigger value.\n\n- The evaluation may need other threads running while it's running:\n In this case, you may need to manually let other paused threads continue.\n\n Alternatively, it's also possible to skip breaking on a particular thread by setting a\n `pydev_do_not_trace = True` attribute in the related threading.Thread instance\n (if some thread should always be running and no breakpoints are expected to be hit in it).\n\n- The evaluation is deadlocked:\n In this case you may set the PYDEVD_THREAD_DUMP_ON_WARN_EVALUATION_TIMEOUT\n environment variable to true so that a thread dump is shown along with this message and\n optionally, set the PYDEVD_INTERRUPT_THREAD_TIMEOUT to some value so that the debugger\n tries to interrupt the evaluation (if possible) when this happens.\n""" % (expression, pydevd_constants.PYDEVD_WARN_EVALUATION_TIMEOUT)\n\n if pydevd_constants.PYDEVD_THREAD_DUMP_ON_WARN_EVALUATION_TIMEOUT:\n stream = StringIO()\n pydevd_utils.dump_threads(stream, show_pydevd_threads=False)\n msg += "\n\n%s\n" % stream.getvalue()\n return self.make_warning_message(msg)\n\n def make_exit_command(self, py_db):\n return NULL_EXIT_COMMAND\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_net_command_factory_xml.py
|
pydevd_net_command_factory_xml.py
|
Python
| 24,342 | 0.95 | 0.207885 | 0.025105 |
vue-tools
| 575 |
2025-01-27T07:03:36.845954
|
MIT
| false |
3d7cb19f8e40d4d215103b4a729e397d
|
import types\n\nfrom _pydev_bundle import pydev_log\nfrom typing import Tuple, Literal\n\ntry:\n from pydevd_plugins import django_debug\nexcept:\n django_debug = None\n pydev_log.debug("Unable to load django_debug plugin")\n\ntry:\n from pydevd_plugins import jinja2_debug\nexcept:\n jinja2_debug = None\n pydev_log.debug("Unable to load jinja2_debug plugin")\n\n\ndef load_plugins():\n plugins = []\n if django_debug is not None:\n plugins.append(django_debug)\n\n if jinja2_debug is not None:\n plugins.append(jinja2_debug)\n return plugins\n\n\ndef bind_func_to_method(func, obj, method_name):\n bound_method = types.MethodType(func, obj)\n\n setattr(obj, method_name, bound_method)\n return bound_method\n\n\nclass PluginManager(object):\n EMPTY_SENTINEL = object()\n\n def __init__(self, main_debugger):\n self.plugins = load_plugins()\n\n # When some breakpoint is added for a given plugin it becomes active.\n self.active_plugins = []\n\n self.main_debugger = main_debugger\n\n def add_breakpoint(self, func_name, *args, **kwargs):\n # add breakpoint for plugin\n for plugin in self.plugins:\n if hasattr(plugin, func_name):\n func = getattr(plugin, func_name)\n result = func(*args, **kwargs)\n if result:\n self.activate(plugin)\n return result\n return None\n\n def activate(self, plugin):\n if plugin not in self.active_plugins:\n self.active_plugins.append(plugin)\n\n # These are not a part of the API, rather, `add_breakpoint` should be used with `add_line_breakpoint` or `add_exception_breakpoint`\n # which will call it for all plugins and then if it's valid it'll be activated.\n #\n # def add_line_breakpoint(self, py_db, type, canonical_normalized_filename, breakpoint_id, line, condition, expression, func_name, hit_condition=None, is_logpoint=False, add_breakpoint_result=None, on_changed_breakpoint_state=None):\n # def add_exception_breakpoint(plugin, py_db, type, exception):\n\n def after_breakpoints_consolidated(self, py_db, canonical_normalized_filename, id_to_pybreakpoint, file_to_line_to_breakpoints):\n for plugin in self.active_plugins:\n plugin.after_breakpoints_consolidated(py_db, canonical_normalized_filename, id_to_pybreakpoint, file_to_line_to_breakpoints)\n\n def remove_exception_breakpoint(self, py_db, exception_type, exception):\n """\n :param exception_type: 'django', 'jinja2' (can be extended)\n """\n for plugin in self.active_plugins:\n ret = plugin.remove_exception_breakpoint(py_db, exception_type, exception)\n if ret:\n return ret\n\n return None\n\n def remove_all_exception_breakpoints(self, py_db):\n for plugin in self.active_plugins:\n plugin.remove_all_exception_breakpoints(py_db)\n\n def get_breakpoints(self, py_db, breakpoint_type):\n """\n :param breakpoint_type: 'django-line', 'jinja2-line'\n """\n for plugin in self.active_plugins:\n ret = plugin.get_breakpoints(py_db, breakpoint_type)\n if ret:\n return ret\n\n def can_skip(self, py_db, frame):\n for plugin in self.active_plugins:\n if not plugin.can_skip(py_db, frame):\n return False\n return True\n\n def required_events_breakpoint(self) -> Tuple[Literal["line", "call"], ...]:\n ret = ()\n for plugin in self.active_plugins:\n new = plugin.required_events_breakpoint()\n if new:\n ret += new\n\n return ret\n\n def required_events_stepping(self) -> Tuple[Literal["line", "call", "return"], ...]:\n ret = ()\n for plugin in self.active_plugins:\n new = plugin.required_events_stepping()\n if new:\n ret += new\n\n return ret\n\n def is_tracked_frame(self, frame) -> bool:\n for plugin in self.active_plugins:\n if plugin.is_tracked_frame(frame):\n return True\n return False\n\n def has_exception_breaks(self, py_db) -> bool:\n for plugin in self.active_plugins:\n if plugin.has_exception_breaks(py_db):\n return True\n return False\n\n def has_line_breaks(self, py_db) -> bool:\n for plugin in self.active_plugins:\n if plugin.has_line_breaks(py_db):\n return True\n return False\n\n def cmd_step_into(self, py_db, frame, event, info, thread, stop_info, stop: bool):\n """\n :param stop_info: in/out information. If it should stop then it'll be\n filled by the plugin.\n :param stop: whether the stop has already been flagged for this frame.\n :returns:\n tuple(stop, plugin_stop)\n """\n plugin_stop = False\n for plugin in self.active_plugins:\n stop, plugin_stop = plugin.cmd_step_into(py_db, frame, event, info, thread, stop_info, stop)\n if plugin_stop:\n return stop, plugin_stop\n return stop, plugin_stop\n\n def cmd_step_over(self, py_db, frame, event, info, thread, stop_info, stop):\n plugin_stop = False\n for plugin in self.active_plugins:\n stop, plugin_stop = plugin.cmd_step_over(py_db, frame, event, info, thread, stop_info, stop)\n if plugin_stop:\n return stop, plugin_stop\n return stop, plugin_stop\n\n def stop(self, py_db, frame, event, thread, stop_info, arg, step_cmd):\n """\n The way this works is that the `cmd_step_into` or `cmd_step_over`\n is called which then fills the `stop_info` and then this method\n is called to do the actual stop.\n """\n for plugin in self.active_plugins:\n stopped = plugin.stop(py_db, frame, event, thread, stop_info, arg, step_cmd)\n if stopped:\n return stopped\n return False\n\n def get_breakpoint(self, py_db, frame, event, info):\n for plugin in self.active_plugins:\n ret = plugin.get_breakpoint(py_db, frame, event, info)\n if ret:\n return ret\n return None\n\n def suspend(self, py_db, thread, frame, bp_type):\n """\n :param bp_type: 'django' or 'jinja2'\n\n :return:\n The frame for the suspend or None if it should not be suspended.\n """\n for plugin in self.active_plugins:\n ret = plugin.suspend(py_db, thread, frame, bp_type)\n if ret is not None:\n return ret\n\n return None\n\n def exception_break(self, py_db, frame, thread, arg, is_unwind=False):\n for plugin in self.active_plugins:\n ret = plugin.exception_break(py_db, frame, thread, arg, is_unwind)\n if ret is not None:\n return ret\n\n return None\n\n def change_variable(self, frame, attr, expression, scope=None):\n for plugin in self.active_plugins:\n ret = plugin.change_variable(frame, attr, expression, self.EMPTY_SENTINEL, scope)\n if ret is not self.EMPTY_SENTINEL:\n return ret\n\n return self.EMPTY_SENTINEL\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_plugin_utils.py
|
pydevd_plugin_utils.py
|
Python
| 7,427 | 0.95 | 0.346154 | 0.041916 |
python-kit
| 568 |
2024-11-03T17:28:24.762698
|
GPL-3.0
| false |
648669c243e3b5a7b6b7ad0005ad5898
|
import itertools\nimport json\nimport linecache\nimport os\nimport platform\nimport sys\nfrom functools import partial\n\nimport pydevd_file_utils\nfrom _pydev_bundle import pydev_log\nfrom _pydevd_bundle._debug_adapter import pydevd_base_schema, pydevd_schema\nfrom _pydevd_bundle._debug_adapter.pydevd_schema import (\n CompletionsResponseBody,\n EvaluateResponseBody,\n ExceptionOptions,\n GotoTargetsResponseBody,\n ModulesResponseBody,\n ProcessEventBody,\n ProcessEvent,\n Scope,\n ScopesResponseBody,\n SetExpressionResponseBody,\n SetVariableResponseBody,\n SourceBreakpoint,\n SourceResponseBody,\n VariablesResponseBody,\n SetBreakpointsResponseBody,\n Response,\n Capabilities,\n PydevdAuthorizeRequest,\n Request,\n StepInTargetsResponseBody,\n SetFunctionBreakpointsResponseBody,\n BreakpointEvent,\n BreakpointEventBody,\n InitializedEvent,\n)\nfrom _pydevd_bundle.pydevd_api import PyDevdAPI\nfrom _pydevd_bundle.pydevd_breakpoints import get_exception_class, FunctionBreakpoint\nfrom _pydevd_bundle.pydevd_comm_constants import (\n CMD_PROCESS_EVENT,\n CMD_RETURN,\n CMD_SET_NEXT_STATEMENT,\n CMD_STEP_INTO,\n CMD_STEP_INTO_MY_CODE,\n CMD_STEP_OVER,\n CMD_STEP_OVER_MY_CODE,\n file_system_encoding,\n CMD_STEP_RETURN_MY_CODE,\n CMD_STEP_RETURN,\n)\nfrom _pydevd_bundle.pydevd_filtering import ExcludeFilter\nfrom _pydevd_bundle.pydevd_json_debug_options import _extract_debug_options, DebugOptions\nfrom _pydevd_bundle.pydevd_net_command import NetCommand\nfrom _pydevd_bundle.pydevd_utils import convert_dap_log_message_to_expression, ScopeRequest\nfrom _pydevd_bundle.pydevd_constants import PY_IMPL_NAME, DebugInfoHolder, PY_VERSION_STR, PY_IMPL_VERSION_STR, IS_64BIT_PROCESS\nfrom _pydevd_bundle.pydevd_trace_dispatch import USING_CYTHON\nfrom _pydevd_frame_eval.pydevd_frame_eval_main import USING_FRAME_EVAL\nfrom _pydevd_bundle.pydevd_comm import internal_get_step_in_targets_json\nfrom _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info\nfrom _pydevd_bundle.pydevd_thread_lifecycle import pydevd_find_thread_by_id\n\n\ndef _convert_rules_to_exclude_filters(rules, on_error):\n exclude_filters = []\n if not isinstance(rules, list):\n on_error('Invalid "rules" (expected list of dicts). Found: %s' % (rules,))\n\n else:\n directory_exclude_filters = []\n module_exclude_filters = []\n glob_exclude_filters = []\n\n for rule in rules:\n if not isinstance(rule, dict):\n on_error('Invalid "rules" (expected list of dicts). Found: %s' % (rules,))\n continue\n\n include = rule.get("include")\n if include is None:\n on_error('Invalid "rule" (expected dict with "include"). Found: %s' % (rule,))\n continue\n\n path = rule.get("path")\n module = rule.get("module")\n if path is None and module is None:\n on_error('Invalid "rule" (expected dict with "path" or "module"). Found: %s' % (rule,))\n continue\n\n if path is not None:\n glob_pattern = path\n if "*" not in path and "?" not in path:\n if os.path.isdir(glob_pattern):\n # If a directory was specified, add a '/**'\n # to be consistent with the glob pattern required\n # by pydevd.\n if not glob_pattern.endswith("/") and not glob_pattern.endswith("\\"):\n glob_pattern += "/"\n glob_pattern += "**"\n directory_exclude_filters.append(ExcludeFilter(glob_pattern, not include, True))\n else:\n glob_exclude_filters.append(ExcludeFilter(glob_pattern, not include, True))\n\n elif module is not None:\n module_exclude_filters.append(ExcludeFilter(module, not include, False))\n\n else:\n on_error("Internal error: expected path or module to be specified.")\n\n # Note that we have to sort the directory/module exclude filters so that the biggest\n # paths match first.\n # i.e.: if we have:\n # /sub1/sub2/sub3\n # a rule with /sub1/sub2 would match before a rule only with /sub1.\n directory_exclude_filters = sorted(directory_exclude_filters, key=lambda exclude_filter: -len(exclude_filter.name))\n module_exclude_filters = sorted(module_exclude_filters, key=lambda exclude_filter: -len(exclude_filter.name))\n exclude_filters = directory_exclude_filters + glob_exclude_filters + module_exclude_filters\n\n return exclude_filters\n\n\nclass IDMap(object):\n def __init__(self):\n self._value_to_key = {}\n self._key_to_value = {}\n self._next_id = partial(next, itertools.count(0))\n\n def obtain_value(self, key):\n return self._key_to_value[key]\n\n def obtain_key(self, value):\n try:\n key = self._value_to_key[value]\n except KeyError:\n key = self._next_id()\n self._key_to_value[key] = value\n self._value_to_key[value] = key\n return key\n\n\nclass PyDevJsonCommandProcessor(object):\n def __init__(self, from_json):\n self.from_json = from_json\n self.api = PyDevdAPI()\n self._options = DebugOptions()\n self._next_breakpoint_id = partial(next, itertools.count(0))\n self._goto_targets_map = IDMap()\n self._launch_or_attach_request_done = False\n\n def process_net_command_json(self, py_db, json_contents, send_response=True):\n """\n Processes a debug adapter protocol json command.\n """\n\n DEBUG = False\n\n try:\n if isinstance(json_contents, bytes):\n json_contents = json_contents.decode("utf-8")\n\n request = self.from_json(json_contents, update_ids_from_dap=True)\n except Exception as e:\n try:\n loaded_json = json.loads(json_contents)\n request = Request(loaded_json.get("command", "<unknown>"), loaded_json["seq"])\n except:\n # There's not much we can do in this case...\n pydev_log.exception("Error loading json: %s", json_contents)\n return\n\n error_msg = str(e)\n if error_msg.startswith("'") and error_msg.endswith("'"):\n error_msg = error_msg[1:-1]\n\n # This means a failure processing the request (but we were able to load the seq,\n # so, answer with a failure response).\n def on_request(py_db, request):\n error_response = {\n "type": "response",\n "request_seq": request.seq,\n "success": False,\n "command": request.command,\n "message": error_msg,\n }\n return NetCommand(CMD_RETURN, 0, error_response, is_json=True)\n\n else:\n if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:\n pydev_log.info(\n "Process %s: %s\n"\n % (\n request.__class__.__name__,\n json.dumps(request.to_dict(update_ids_to_dap=True), indent=4, sort_keys=True),\n )\n )\n\n assert request.type == "request"\n method_name = "on_%s_request" % (request.command.lower(),)\n on_request = getattr(self, method_name, None)\n if on_request is None:\n print("Unhandled: %s not available in PyDevJsonCommandProcessor.\n" % (method_name,))\n return\n\n if DEBUG:\n print("Handled in pydevd: %s (in PyDevJsonCommandProcessor).\n" % (method_name,))\n\n with py_db._main_lock:\n if request.__class__ == PydevdAuthorizeRequest:\n authorize_request = request # : :type authorize_request: PydevdAuthorizeRequest\n access_token = authorize_request.arguments.debugServerAccessToken\n py_db.authentication.login(access_token)\n\n if not py_db.authentication.is_authenticated():\n response = Response(request.seq, success=False, command=request.command, message="Client not authenticated.", body={})\n cmd = NetCommand(CMD_RETURN, 0, response, is_json=True)\n py_db.writer.add_command(cmd)\n return\n\n cmd = on_request(py_db, request)\n if cmd is not None and send_response:\n py_db.writer.add_command(cmd)\n\n def on_pydevdauthorize_request(self, py_db, request):\n client_access_token = py_db.authentication.client_access_token\n body = {"clientAccessToken": None}\n if client_access_token:\n body["clientAccessToken"] = client_access_token\n\n response = pydevd_base_schema.build_response(request, kwargs={"body": body})\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def on_initialize_request(self, py_db, request):\n body = Capabilities(\n # Supported.\n supportsConfigurationDoneRequest=True,\n supportsConditionalBreakpoints=True,\n supportsHitConditionalBreakpoints=True,\n supportsEvaluateForHovers=True,\n supportsSetVariable=True,\n supportsGotoTargetsRequest=True,\n supportsCompletionsRequest=True,\n supportsModulesRequest=True,\n supportsExceptionOptions=True,\n supportsValueFormattingOptions=True,\n supportsExceptionInfoRequest=True,\n supportTerminateDebuggee=True,\n supportsDelayedStackTraceLoading=True,\n supportsLogPoints=True,\n supportsSetExpression=True,\n supportsTerminateRequest=True,\n supportsClipboardContext=True,\n supportsFunctionBreakpoints=True,\n exceptionBreakpointFilters=[\n {"filter": "raised", "label": "Raised Exceptions", "default": False},\n {"filter": "uncaught", "label": "Uncaught Exceptions", "default": True},\n {"filter": "userUnhandled", "label": "User Uncaught Exceptions", "default": False},\n ],\n # Not supported.\n supportsStepBack=False,\n supportsRestartFrame=False,\n supportsStepInTargetsRequest=True,\n supportsRestartRequest=False,\n supportsLoadedSourcesRequest=False,\n supportsTerminateThreadsRequest=False,\n supportsDataBreakpoints=False,\n supportsReadMemoryRequest=False,\n supportsDisassembleRequest=False,\n additionalModuleColumns=[],\n completionTriggerCharacters=[],\n supportedChecksumAlgorithms=[],\n ).to_dict()\n\n # Non-standard capabilities/info below.\n body["supportsDebuggerProperties"] = True\n\n body["pydevd"] = pydevd_info = {}\n pydevd_info["processId"] = os.getpid()\n self.api.notify_initialize(py_db)\n response = pydevd_base_schema.build_response(request, kwargs={"body": body})\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def on_configurationdone_request(self, py_db, request):\n """\n :param ConfigurationDoneRequest request:\n """\n if not self._launch_or_attach_request_done:\n pydev_log.critical("Missing launch request or attach request before configuration done request.")\n\n self.api.run(py_db)\n self.api.notify_configuration_done(py_db)\n\n configuration_done_response = pydevd_base_schema.build_response(request)\n return NetCommand(CMD_RETURN, 0, configuration_done_response, is_json=True)\n\n def on_threads_request(self, py_db, request):\n """\n :param ThreadsRequest request:\n """\n return self.api.list_threads(py_db, request.seq)\n\n def on_terminate_request(self, py_db, request):\n """\n :param TerminateRequest request:\n """\n self._request_terminate_process(py_db)\n response = pydevd_base_schema.build_response(request)\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def _request_terminate_process(self, py_db):\n self.api.request_terminate_process(py_db)\n\n def on_completions_request(self, py_db, request):\n """\n :param CompletionsRequest request:\n """\n arguments = request.arguments # : :type arguments: CompletionsArguments\n seq = request.seq\n text = arguments.text\n frame_id = arguments.frameId\n thread_id = py_db.suspended_frames_manager.get_thread_id_for_variable_reference(frame_id)\n\n if thread_id is None:\n body = CompletionsResponseBody([])\n variables_response = pydevd_base_schema.build_response(\n request, kwargs={"body": body, "success": False, "message": "Thread to get completions seems to have resumed already."}\n )\n return NetCommand(CMD_RETURN, 0, variables_response, is_json=True)\n\n # Note: line and column are 1-based (convert to 0-based for pydevd).\n column = arguments.column - 1\n\n if arguments.line is None:\n # line is optional\n line = -1\n else:\n line = arguments.line - 1\n\n self.api.request_completions(py_db, seq, thread_id, frame_id, text, line=line, column=column)\n\n def _resolve_remote_root(self, local_root, remote_root):\n if remote_root == ".":\n cwd = os.getcwd()\n append_pathsep = local_root.endswith("\\") or local_root.endswith("/")\n return cwd + (os.path.sep if append_pathsep else "")\n return remote_root\n\n def _set_debug_options(self, py_db, args, start_reason):\n rules = args.get("rules")\n stepping_resumes_all_threads = args.get("steppingResumesAllThreads", True)\n self.api.set_stepping_resumes_all_threads(py_db, stepping_resumes_all_threads)\n\n terminate_child_processes = args.get("terminateChildProcesses", True)\n self.api.set_terminate_child_processes(py_db, terminate_child_processes)\n\n terminate_keyboard_interrupt = args.get("onTerminate", "kill") == "KeyboardInterrupt"\n self.api.set_terminate_keyboard_interrupt(py_db, terminate_keyboard_interrupt)\n\n variable_presentation = args.get("variablePresentation", None)\n if isinstance(variable_presentation, dict):\n\n def get_variable_presentation(setting, default):\n value = variable_presentation.get(setting, default)\n if value not in ("group", "inline", "hide"):\n pydev_log.info(\n 'The value set for "%s" (%s) in the variablePresentation is not valid. Valid values are: "group", "inline", "hide"'\n % (\n setting,\n value,\n )\n )\n value = default\n\n return value\n\n default = get_variable_presentation("all", "group")\n\n special_presentation = get_variable_presentation("special", default)\n function_presentation = get_variable_presentation("function", default)\n class_presentation = get_variable_presentation("class", default)\n protected_presentation = get_variable_presentation("protected", default)\n\n self.api.set_variable_presentation(\n py_db,\n self.api.VariablePresentation(special_presentation, function_presentation, class_presentation, protected_presentation),\n )\n\n exclude_filters = []\n\n if rules is not None:\n exclude_filters = _convert_rules_to_exclude_filters(rules, lambda msg: self.api.send_error_message(py_db, msg))\n\n self.api.set_exclude_filters(py_db, exclude_filters)\n\n debug_options = _extract_debug_options(\n args.get("options"),\n args.get("debugOptions"),\n )\n self._options.update_fom_debug_options(debug_options)\n self._options.update_from_args(args)\n\n self.api.set_use_libraries_filter(py_db, self._options.just_my_code)\n\n if self._options.client_os:\n self.api.set_ide_os(self._options.client_os)\n\n path_mappings = []\n for pathMapping in args.get("pathMappings", []):\n localRoot = pathMapping.get("localRoot", "")\n remoteRoot = pathMapping.get("remoteRoot", "")\n remoteRoot = self._resolve_remote_root(localRoot, remoteRoot)\n if (localRoot != "") and (remoteRoot != ""):\n path_mappings.append((localRoot, remoteRoot))\n\n if bool(path_mappings):\n pydevd_file_utils.setup_client_server_paths(path_mappings)\n\n resolve_symlinks = args.get("resolveSymlinks", None)\n if resolve_symlinks is not None:\n pydevd_file_utils.set_resolve_symlinks(resolve_symlinks)\n\n redirecting = args.get("isOutputRedirected")\n if self._options.redirect_output:\n py_db.enable_output_redirection(True, True)\n redirecting = True\n else:\n py_db.enable_output_redirection(False, False)\n\n py_db.is_output_redirected = redirecting\n\n self.api.set_show_return_values(py_db, self._options.show_return_value)\n\n if not self._options.break_system_exit_zero:\n ignore_system_exit_codes = [0, None]\n if self._options.django_debug or self._options.flask_debug:\n ignore_system_exit_codes += [3]\n\n self.api.set_ignore_system_exit_codes(py_db, ignore_system_exit_codes)\n\n auto_reload = args.get("autoReload", {})\n if not isinstance(auto_reload, dict):\n pydev_log.info("Expected autoReload to be a dict. Received: %s" % (auto_reload,))\n auto_reload = {}\n\n enable_auto_reload = auto_reload.get("enable", False)\n watch_dirs = auto_reload.get("watchDirectories")\n if not watch_dirs:\n watch_dirs = []\n # Note: by default this is no longer done because on some cases there are entries in the PYTHONPATH\n # such as the home directory or /python/x64, where the site packages are in /python/x64/libs, so,\n # we only watch the current working directory as well as executed script.\n # check = getattr(sys, 'path', [])[:]\n # # By default only watch directories that are in the project roots /\n # # program dir (if available), sys.argv[0], as well as the current dir (we don't want to\n # # listen to the whole site-packages by default as it can be huge).\n # watch_dirs = [pydevd_file_utils.absolute_path(w) for w in check]\n # watch_dirs = [w for w in watch_dirs if py_db.in_project_roots_filename_uncached(w) and os.path.isdir(w)]\n\n program = args.get("program")\n if program:\n if os.path.isdir(program):\n watch_dirs.append(program)\n else:\n watch_dirs.append(os.path.dirname(program))\n watch_dirs.append(os.path.abspath("."))\n\n argv = getattr(sys, "argv", [])\n if argv:\n f = argv[0]\n if f: # argv[0] could be None (https://github.com/microsoft/debugpy/issues/987)\n if os.path.isdir(f):\n watch_dirs.append(f)\n else:\n watch_dirs.append(os.path.dirname(f))\n\n if not isinstance(watch_dirs, (list, set, tuple)):\n watch_dirs = (watch_dirs,)\n new_watch_dirs = set()\n for w in watch_dirs:\n try:\n new_watch_dirs.add(pydevd_file_utils.get_path_with_real_case(pydevd_file_utils.absolute_path(w)))\n except Exception:\n pydev_log.exception("Error adding watch dir: %s", w)\n watch_dirs = new_watch_dirs\n\n poll_target_time = auto_reload.get("pollingInterval", 1)\n exclude_patterns = auto_reload.get(\n "exclude", ("**/.git/**", "**/__pycache__/**", "**/node_modules/**", "**/.metadata/**", "**/site-packages/**")\n )\n include_patterns = auto_reload.get("include", ("**/*.py", "**/*.pyw"))\n self.api.setup_auto_reload_watcher(py_db, enable_auto_reload, watch_dirs, poll_target_time, exclude_patterns, include_patterns)\n\n if self._options.stop_on_entry and start_reason == "launch":\n self.api.stop_on_entry()\n\n self.api.set_gui_event_loop(py_db, self._options.gui_event_loop)\n\n def _send_process_event(self, py_db, start_method):\n argv = getattr(sys, "argv", [])\n if len(argv) > 0:\n name = argv[0]\n else:\n name = ""\n\n if isinstance(name, bytes):\n name = name.decode(file_system_encoding, "replace")\n name = name.encode("utf-8")\n\n body = ProcessEventBody(\n name=name,\n systemProcessId=os.getpid(),\n isLocalProcess=True,\n startMethod=start_method,\n )\n event = ProcessEvent(body)\n py_db.writer.add_command(NetCommand(CMD_PROCESS_EVENT, 0, event, is_json=True))\n\n def _handle_launch_or_attach_request(self, py_db, request, start_reason):\n self._send_process_event(py_db, start_reason)\n self._launch_or_attach_request_done = True\n self.api.set_enable_thread_notifications(py_db, True)\n self._set_debug_options(py_db, request.arguments.kwargs, start_reason=start_reason)\n response = pydevd_base_schema.build_response(request)\n\n initialized_event = InitializedEvent()\n py_db.writer.add_command(NetCommand(CMD_RETURN, 0, initialized_event, is_json=True))\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def on_launch_request(self, py_db, request):\n """\n :param LaunchRequest request:\n """\n return self._handle_launch_or_attach_request(py_db, request, start_reason="launch")\n\n def on_attach_request(self, py_db, request):\n """\n :param AttachRequest request:\n """\n return self._handle_launch_or_attach_request(py_db, request, start_reason="attach")\n\n def on_pause_request(self, py_db, request):\n """\n :param PauseRequest request:\n """\n arguments = request.arguments # : :type arguments: PauseArguments\n thread_id = arguments.threadId\n\n self.api.request_suspend_thread(py_db, thread_id=thread_id)\n\n response = pydevd_base_schema.build_response(request)\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def on_continue_request(self, py_db, request):\n """\n :param ContinueRequest request:\n """\n arguments = request.arguments # : :type arguments: ContinueArguments\n thread_id = arguments.threadId\n\n def on_resumed():\n body = {"allThreadsContinued": thread_id == "*"}\n response = pydevd_base_schema.build_response(request, kwargs={"body": body})\n cmd = NetCommand(CMD_RETURN, 0, response, is_json=True)\n py_db.writer.add_command(cmd)\n\n if py_db.multi_threads_single_notification:\n # Only send resumed notification when it has actually resumed!\n # (otherwise the user could send a continue, receive the notification and then\n # request a new pause which would be paused without sending any notification as\n # it didn't really run in the first place).\n py_db.threads_suspended_single_notification.add_on_resumed_callback(on_resumed)\n self.api.request_resume_thread(thread_id)\n else:\n # Only send resumed notification when it has actually resumed!\n # (otherwise the user could send a continue, receive the notification and then\n # request a new pause which would be paused without sending any notification as\n # it didn't really run in the first place).\n self.api.request_resume_thread(thread_id)\n on_resumed()\n\n def on_next_request(self, py_db, request):\n """\n :param NextRequest request:\n """\n arguments = request.arguments # : :type arguments: NextArguments\n thread_id = arguments.threadId\n\n if py_db.get_use_libraries_filter():\n step_cmd_id = CMD_STEP_OVER_MY_CODE\n else:\n step_cmd_id = CMD_STEP_OVER\n\n self.api.request_step(py_db, thread_id, step_cmd_id)\n\n response = pydevd_base_schema.build_response(request)\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def on_stepin_request(self, py_db, request):\n """\n :param StepInRequest request:\n """\n arguments = request.arguments # : :type arguments: StepInArguments\n thread_id = arguments.threadId\n\n target_id = arguments.targetId\n if target_id is not None:\n thread = pydevd_find_thread_by_id(thread_id)\n if thread is None:\n response = Response(\n request_seq=request.seq,\n success=False,\n command=request.command,\n message="Unable to find thread from thread_id: %s" % (thread_id,),\n body={},\n )\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n info = set_additional_thread_info(thread)\n target_id_to_smart_step_into_variant = info.target_id_to_smart_step_into_variant\n if not target_id_to_smart_step_into_variant:\n variables_response = pydevd_base_schema.build_response(\n request, kwargs={"success": False, "message": "Unable to step into target (no targets are saved in the thread info)."}\n )\n return NetCommand(CMD_RETURN, 0, variables_response, is_json=True)\n\n variant = target_id_to_smart_step_into_variant.get(target_id)\n if variant is not None:\n parent = variant.parent\n if parent is not None:\n self.api.request_smart_step_into(py_db, request.seq, thread_id, parent.offset, variant.offset)\n else:\n self.api.request_smart_step_into(py_db, request.seq, thread_id, variant.offset, -1)\n else:\n variables_response = pydevd_base_schema.build_response(\n request,\n kwargs={\n "success": False,\n "message": "Unable to find step into target %s. Available targets: %s"\n % (target_id, target_id_to_smart_step_into_variant),\n },\n )\n return NetCommand(CMD_RETURN, 0, variables_response, is_json=True)\n\n else:\n if py_db.get_use_libraries_filter():\n step_cmd_id = CMD_STEP_INTO_MY_CODE\n else:\n step_cmd_id = CMD_STEP_INTO\n\n self.api.request_step(py_db, thread_id, step_cmd_id)\n\n response = pydevd_base_schema.build_response(request)\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def on_stepintargets_request(self, py_db, request):\n """\n :param StepInTargetsRequest request:\n """\n frame_id = request.arguments.frameId\n thread_id = py_db.suspended_frames_manager.get_thread_id_for_variable_reference(frame_id)\n\n if thread_id is None:\n body = StepInTargetsResponseBody([])\n variables_response = pydevd_base_schema.build_response(\n request,\n kwargs={\n "body": body,\n "success": False,\n "message": "Unable to get thread_id from frame_id (thread to get step in targets seems to have resumed already).",\n },\n )\n return NetCommand(CMD_RETURN, 0, variables_response, is_json=True)\n\n py_db.post_method_as_internal_command(\n thread_id, internal_get_step_in_targets_json, request.seq, thread_id, frame_id, request, set_additional_thread_info\n )\n\n def on_stepout_request(self, py_db, request):\n """\n :param StepOutRequest request:\n """\n arguments = request.arguments # : :type arguments: StepOutArguments\n thread_id = arguments.threadId\n\n if py_db.get_use_libraries_filter():\n step_cmd_id = CMD_STEP_RETURN_MY_CODE\n else:\n step_cmd_id = CMD_STEP_RETURN\n\n self.api.request_step(py_db, thread_id, step_cmd_id)\n\n response = pydevd_base_schema.build_response(request)\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def _get_hit_condition_expression(self, hit_condition):\n """Following hit condition values are supported\n\n * x or == x when breakpoint is hit x times\n * >= x when breakpoint is hit more than or equal to x times\n * % x when breakpoint is hit multiple of x times\n\n Returns '@HIT@ == x' where @HIT@ will be replaced by number of hits\n """\n if not hit_condition:\n return None\n\n expr = hit_condition.strip()\n try:\n int(expr)\n return "@HIT@ == {}".format(expr)\n except ValueError:\n pass\n\n if expr.startswith("%"):\n return "@HIT@ {} == 0".format(expr)\n\n if expr.startswith("==") or expr.startswith(">") or expr.startswith("<"):\n return "@HIT@ {}".format(expr)\n\n return hit_condition\n\n def on_disconnect_request(self, py_db, request):\n """\n :param DisconnectRequest request:\n """\n if request.arguments.terminateDebuggee:\n self._request_terminate_process(py_db)\n response = pydevd_base_schema.build_response(request)\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n self._launch_or_attach_request_done = False\n py_db.enable_output_redirection(False, False)\n self.api.request_disconnect(py_db, resume_threads=True)\n\n response = pydevd_base_schema.build_response(request)\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def _verify_launch_or_attach_done(self, request):\n if not self._launch_or_attach_request_done:\n # Note that to validate the breakpoints we need the launch request to be done already\n # (otherwise the filters wouldn't be set for the breakpoint validation).\n if request.command == "setFunctionBreakpoints":\n body = SetFunctionBreakpointsResponseBody([])\n else:\n body = SetBreakpointsResponseBody([])\n response = pydevd_base_schema.build_response(\n request,\n kwargs={"body": body, "success": False, "message": "Breakpoints may only be set after the launch request is received."},\n )\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def on_setfunctionbreakpoints_request(self, py_db, request):\n """\n :param SetFunctionBreakpointsRequest request:\n """\n response = self._verify_launch_or_attach_done(request)\n if response is not None:\n return response\n\n arguments = request.arguments # : :type arguments: SetFunctionBreakpointsArguments\n function_breakpoints = []\n suspend_policy = "ALL" if py_db.multi_threads_single_notification else "NONE"\n\n # Not currently covered by the DAP.\n is_logpoint = False\n expression = None\n\n breakpoints_set = []\n arguments.breakpoints = arguments.breakpoints or []\n for bp in arguments.breakpoints:\n hit_condition = self._get_hit_condition_expression(bp.get("hitCondition"))\n condition = bp.get("condition")\n\n function_breakpoints.append(FunctionBreakpoint(bp["name"], condition, expression, suspend_policy, hit_condition, is_logpoint))\n\n # Note: always succeeds.\n breakpoints_set.append(pydevd_schema.Breakpoint(verified=True, id=self._next_breakpoint_id()).to_dict())\n\n self.api.set_function_breakpoints(py_db, function_breakpoints)\n\n body = {"breakpoints": breakpoints_set}\n set_breakpoints_response = pydevd_base_schema.build_response(request, kwargs={"body": body})\n return NetCommand(CMD_RETURN, 0, set_breakpoints_response, is_json=True)\n\n def on_setbreakpoints_request(self, py_db, request):\n """\n :param SetBreakpointsRequest request:\n """\n response = self._verify_launch_or_attach_done(request)\n if response is not None:\n return response\n\n arguments = request.arguments # : :type arguments: SetBreakpointsArguments\n # TODO: Path is optional here it could be source reference.\n filename = self.api.filename_to_str(arguments.source.path)\n func_name = "None"\n\n self.api.remove_all_breakpoints(py_db, filename)\n\n btype = "python-line"\n suspend_policy = "ALL" if py_db.multi_threads_single_notification else "NONE"\n\n if not filename.lower().endswith(".py"): # Note: check based on original file, not mapping.\n if self._options.django_debug:\n btype = "django-line"\n elif self._options.flask_debug:\n btype = "jinja2-line"\n\n breakpoints_set = []\n arguments.breakpoints = arguments.breakpoints or []\n for source_breakpoint in arguments.breakpoints:\n source_breakpoint = SourceBreakpoint(**source_breakpoint)\n line = source_breakpoint.line\n condition = source_breakpoint.condition\n breakpoint_id = self._next_breakpoint_id()\n\n hit_condition = self._get_hit_condition_expression(source_breakpoint.hitCondition)\n log_message = source_breakpoint.logMessage\n if not log_message:\n is_logpoint = None\n expression = None\n else:\n is_logpoint = True\n expression = convert_dap_log_message_to_expression(log_message)\n\n on_changed_breakpoint_state = partial(self._on_changed_breakpoint_state, py_db, arguments.source)\n result = self.api.add_breakpoint(\n py_db,\n filename,\n btype,\n breakpoint_id,\n line,\n condition,\n func_name,\n expression,\n suspend_policy,\n hit_condition,\n is_logpoint,\n adjust_line=True,\n on_changed_breakpoint_state=on_changed_breakpoint_state,\n )\n\n bp = self._create_breakpoint_from_add_breakpoint_result(py_db, arguments.source, breakpoint_id, result)\n breakpoints_set.append(bp)\n\n body = {"breakpoints": breakpoints_set}\n set_breakpoints_response = pydevd_base_schema.build_response(request, kwargs={"body": body})\n return NetCommand(CMD_RETURN, 0, set_breakpoints_response, is_json=True)\n\n def _on_changed_breakpoint_state(self, py_db, source, breakpoint_id, result):\n bp = self._create_breakpoint_from_add_breakpoint_result(py_db, source, breakpoint_id, result)\n body = BreakpointEventBody(\n reason="changed",\n breakpoint=bp,\n )\n event = BreakpointEvent(body)\n event_id = 0 # Actually ignored in this case\n py_db.writer.add_command(NetCommand(event_id, 0, event, is_json=True))\n\n def _create_breakpoint_from_add_breakpoint_result(self, py_db, source, breakpoint_id, result):\n error_code = result.error_code\n\n if error_code:\n if error_code == self.api.ADD_BREAKPOINT_FILE_NOT_FOUND:\n error_msg = "Breakpoint in file that does not exist."\n\n elif error_code == self.api.ADD_BREAKPOINT_FILE_EXCLUDED_BY_FILTERS:\n error_msg = "Breakpoint in file excluded by filters."\n if py_db.get_use_libraries_filter():\n error_msg += (\n '\nNote: may be excluded because of "justMyCode" option (default == true).'\n 'Try setting "justMyCode": false in the debug configuration (e.g., launch.json).\n'\n )\n\n elif error_code == self.api.ADD_BREAKPOINT_LAZY_VALIDATION:\n error_msg = "Waiting for code to be loaded to verify breakpoint."\n\n elif error_code == self.api.ADD_BREAKPOINT_INVALID_LINE:\n error_msg = "Breakpoint added to invalid line."\n\n else:\n # Shouldn't get here.\n error_msg = "Breakpoint not validated (reason unknown -- please report as bug)."\n\n return pydevd_schema.Breakpoint(\n verified=False, id=breakpoint_id, line=result.translated_line, message=error_msg, source=source\n ).to_dict()\n else:\n return pydevd_schema.Breakpoint(verified=True, id=breakpoint_id, line=result.translated_line, source=source).to_dict()\n\n def on_setexceptionbreakpoints_request(self, py_db, request):\n """\n :param SetExceptionBreakpointsRequest request:\n """\n # : :type arguments: SetExceptionBreakpointsArguments\n arguments = request.arguments\n filters = arguments.filters\n exception_options = arguments.exceptionOptions\n self.api.remove_all_exception_breakpoints(py_db)\n\n # Can't set these in the DAP.\n condition = None\n expression = None\n notify_on_first_raise_only = False\n\n ignore_libraries = 1 if py_db.get_use_libraries_filter() else 0\n\n if exception_options:\n break_raised = False\n break_uncaught = False\n\n for option in exception_options:\n option = ExceptionOptions(**option)\n if not option.path:\n continue\n\n # never: never breaks\n #\n # always: always breaks\n #\n # unhandled: breaks when exception unhandled\n #\n # userUnhandled: breaks if the exception is not handled by user code\n\n notify_on_handled_exceptions = 1 if option.breakMode == "always" else 0\n notify_on_unhandled_exceptions = 1 if option.breakMode == "unhandled" else 0\n notify_on_user_unhandled_exceptions = 1 if option.breakMode == "userUnhandled" else 0\n exception_paths = option.path\n break_raised |= notify_on_handled_exceptions\n break_uncaught |= notify_on_unhandled_exceptions\n\n exception_names = []\n if len(exception_paths) == 0:\n continue\n\n elif len(exception_paths) == 1:\n if "Python Exceptions" in exception_paths[0]["names"]:\n exception_names = ["BaseException"]\n\n else:\n path_iterator = iter(exception_paths)\n if "Python Exceptions" in next(path_iterator)["names"]:\n for path in path_iterator:\n for ex_name in path["names"]:\n exception_names.append(ex_name)\n\n for exception_name in exception_names:\n self.api.add_python_exception_breakpoint(\n py_db,\n exception_name,\n condition,\n expression,\n notify_on_handled_exceptions,\n notify_on_unhandled_exceptions,\n notify_on_user_unhandled_exceptions,\n notify_on_first_raise_only,\n ignore_libraries,\n )\n\n else:\n break_raised = "raised" in filters\n break_uncaught = "uncaught" in filters\n break_user = "userUnhandled" in filters\n if break_raised or break_uncaught or break_user:\n notify_on_handled_exceptions = 1 if break_raised else 0\n notify_on_unhandled_exceptions = 1 if break_uncaught else 0\n notify_on_user_unhandled_exceptions = 1 if break_user else 0\n exception = "BaseException"\n\n self.api.add_python_exception_breakpoint(\n py_db,\n exception,\n condition,\n expression,\n notify_on_handled_exceptions,\n notify_on_unhandled_exceptions,\n notify_on_user_unhandled_exceptions,\n notify_on_first_raise_only,\n ignore_libraries,\n )\n\n if break_raised:\n btype = None\n if self._options.django_debug:\n btype = "django"\n elif self._options.flask_debug:\n btype = "jinja2"\n\n if btype:\n self.api.add_plugins_exception_breakpoint(py_db, btype, "BaseException") # Note: Exception name could be anything here.\n\n # Note: no body required on success.\n set_breakpoints_response = pydevd_base_schema.build_response(request)\n return NetCommand(CMD_RETURN, 0, set_breakpoints_response, is_json=True)\n\n def on_stacktrace_request(self, py_db, request):\n """\n :param StackTraceRequest request:\n """\n # : :type stack_trace_arguments: StackTraceArguments\n stack_trace_arguments = request.arguments\n thread_id = stack_trace_arguments.threadId\n\n if stack_trace_arguments.startFrame:\n start_frame = int(stack_trace_arguments.startFrame)\n else:\n start_frame = 0\n\n if stack_trace_arguments.levels:\n levels = int(stack_trace_arguments.levels)\n else:\n levels = 0\n\n fmt = stack_trace_arguments.format\n if hasattr(fmt, "to_dict"):\n fmt = fmt.to_dict()\n self.api.request_stack(py_db, request.seq, thread_id, fmt=fmt, start_frame=start_frame, levels=levels)\n\n def on_exceptioninfo_request(self, py_db, request):\n """\n :param ExceptionInfoRequest request:\n """\n # : :type exception_into_arguments: ExceptionInfoArguments\n exception_into_arguments = request.arguments\n thread_id = exception_into_arguments.threadId\n max_frames = self._options.max_exception_stack_frames\n thread = pydevd_find_thread_by_id(thread_id)\n if thread is not None:\n self.api.request_exception_info_json(py_db, request, thread_id, thread, max_frames)\n else:\n response = Response(\n request_seq=request.seq,\n success=False,\n command=request.command,\n message="Unable to find thread from thread_id: %s" % (thread_id,),\n body={},\n )\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def on_scopes_request(self, py_db, request):\n """\n Scopes are the top-level items which appear for a frame (so, we receive the frame id\n and provide the scopes it has).\n\n :param ScopesRequest request:\n """\n frame_id = request.arguments.frameId\n\n variables_reference = frame_id\n scopes = [\n Scope("Locals", ScopeRequest(int(variables_reference), "locals"), False, presentationHint="locals"),\n Scope("Globals", ScopeRequest(int(variables_reference), "globals"), False),\n ]\n body = ScopesResponseBody(scopes)\n scopes_response = pydevd_base_schema.build_response(request, kwargs={"body": body})\n return NetCommand(CMD_RETURN, 0, scopes_response, is_json=True)\n\n def on_evaluate_request(self, py_db, request):\n """\n :param EvaluateRequest request:\n """\n # : :type arguments: EvaluateArguments\n arguments = request.arguments\n\n if arguments.frameId is None:\n self.api.request_exec_or_evaluate_json(py_db, request, thread_id="*")\n else:\n thread_id = py_db.suspended_frames_manager.get_thread_id_for_variable_reference(arguments.frameId)\n\n if thread_id is not None:\n self.api.request_exec_or_evaluate_json(py_db, request, thread_id)\n else:\n body = EvaluateResponseBody("", 0)\n response = pydevd_base_schema.build_response(\n request, kwargs={"body": body, "success": False, "message": "Unable to find thread for evaluation."}\n )\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def on_setexpression_request(self, py_db, request):\n # : :type arguments: SetExpressionArguments\n arguments = request.arguments\n\n thread_id = py_db.suspended_frames_manager.get_thread_id_for_variable_reference(arguments.frameId)\n\n if thread_id is not None:\n self.api.request_set_expression_json(py_db, request, thread_id)\n else:\n body = SetExpressionResponseBody("")\n response = pydevd_base_schema.build_response(\n request, kwargs={"body": body, "success": False, "message": "Unable to find thread to set expression."}\n )\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def on_variables_request(self, py_db, request):\n """\n Variables can be asked whenever some place returned a variables reference (so, it\n can be a scope gotten from on_scopes_request, the result of some evaluation, etc.).\n\n Note that in the DAP the variables reference requires a unique int... the way this works for\n pydevd is that an instance is generated for that specific variable reference and we use its\n id(instance) to identify it to make sure all items are unique (and the actual {id->instance}\n is added to a dict which is only valid while the thread is suspended and later cleared when\n the related thread resumes execution).\n\n see: SuspendedFramesManager\n\n :param VariablesRequest request:\n """\n arguments = request.arguments # : :type arguments: VariablesArguments\n variables_reference = arguments.variablesReference\n\n if isinstance(variables_reference, ScopeRequest):\n variables_reference = variables_reference.variable_reference\n\n thread_id = py_db.suspended_frames_manager.get_thread_id_for_variable_reference(variables_reference)\n if thread_id is not None:\n self.api.request_get_variable_json(py_db, request, thread_id)\n else:\n variables = []\n body = VariablesResponseBody(variables)\n variables_response = pydevd_base_schema.build_response(\n request, kwargs={"body": body, "success": False, "message": "Unable to find thread to evaluate variable reference."}\n )\n return NetCommand(CMD_RETURN, 0, variables_response, is_json=True)\n\n def on_setvariable_request(self, py_db, request):\n arguments = request.arguments # : :type arguments: SetVariableArguments\n variables_reference = arguments.variablesReference\n\n if isinstance(variables_reference, ScopeRequest):\n variables_reference = variables_reference.variable_reference\n\n if arguments.name.startswith("(return) "):\n response = pydevd_base_schema.build_response(\n request, kwargs={"body": SetVariableResponseBody(""), "success": False, "message": "Cannot change return value"}\n )\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n thread_id = py_db.suspended_frames_manager.get_thread_id_for_variable_reference(variables_reference)\n\n if thread_id is not None:\n self.api.request_change_variable_json(py_db, request, thread_id)\n else:\n response = pydevd_base_schema.build_response(\n request,\n kwargs={\n "body": SetVariableResponseBody(""),\n "success": False,\n "message": "Unable to find thread to evaluate variable reference.",\n },\n )\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def on_modules_request(self, py_db, request):\n modules_manager = py_db.cmd_factory.modules_manager # : :type modules_manager: ModulesManager\n modules_info = modules_manager.get_modules_info()\n body = ModulesResponseBody(modules_info)\n variables_response = pydevd_base_schema.build_response(request, kwargs={"body": body})\n return NetCommand(CMD_RETURN, 0, variables_response, is_json=True)\n\n def on_source_request(self, py_db, request):\n """\n :param SourceRequest request:\n """\n source_reference = request.arguments.sourceReference\n server_filename = None\n content = None\n\n if source_reference != 0:\n server_filename = pydevd_file_utils.get_server_filename_from_source_reference(source_reference)\n if not server_filename:\n server_filename = pydevd_file_utils.get_source_reference_filename_from_linecache(source_reference)\n\n if server_filename:\n # Try direct file access first - it's much faster when available.\n try:\n with open(server_filename, "r") as stream:\n content = stream.read()\n except:\n pass\n\n if content is None:\n # File might not exist at all, or we might not have a permission to read it,\n # but it might also be inside a zipfile, or an IPython cell. In this case,\n # linecache might still be able to retrieve the source.\n lines = (linecache.getline(server_filename, i) for i in itertools.count(1))\n lines = itertools.takewhile(bool, lines) # empty lines are '\n', EOF is ''\n\n # If we didn't get at least one line back, reset it to None so that it's\n # reported as error below, and not as an empty file.\n content = "".join(lines) or None\n\n if content is None:\n frame_id = pydevd_file_utils.get_frame_id_from_source_reference(source_reference)\n pydev_log.debug("Found frame id: %s for source reference: %s", frame_id, source_reference)\n if frame_id is not None:\n try:\n content = self.api.get_decompiled_source_from_frame_id(py_db, frame_id)\n except Exception:\n pydev_log.exception("Error getting source for frame id: %s", frame_id)\n content = None\n\n body = SourceResponseBody(content or "")\n response_args = {"body": body}\n\n if content is None:\n if source_reference == 0:\n message = "Source unavailable"\n elif server_filename:\n message = "Unable to retrieve source for %s" % (server_filename,)\n else:\n message = "Invalid sourceReference %d" % (source_reference,)\n response_args.update({"success": False, "message": message})\n\n response = pydevd_base_schema.build_response(request, kwargs=response_args)\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def on_gototargets_request(self, py_db, request):\n path = request.arguments.source.path\n line = request.arguments.line\n target_id = self._goto_targets_map.obtain_key((path, line))\n target = {"id": target_id, "label": "%s:%s" % (path, line), "line": line}\n body = GotoTargetsResponseBody(targets=[target])\n response_args = {"body": body}\n response = pydevd_base_schema.build_response(request, kwargs=response_args)\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def on_goto_request(self, py_db, request):\n target_id = int(request.arguments.targetId)\n thread_id = request.arguments.threadId\n try:\n path, line = self._goto_targets_map.obtain_value(target_id)\n except KeyError:\n response = pydevd_base_schema.build_response(\n request,\n kwargs={\n "body": {},\n "success": False,\n "message": "Unknown goto target id: %d" % (target_id,),\n },\n )\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n self.api.request_set_next(py_db, request.seq, thread_id, CMD_SET_NEXT_STATEMENT, path, line, "*")\n # See 'NetCommandFactoryJson.make_set_next_stmnt_status_message' for response\n return None\n\n def on_setdebuggerproperty_request(self, py_db, request):\n args = request.arguments # : :type args: SetDebuggerPropertyArguments\n if args.ideOS is not None:\n self.api.set_ide_os(args.ideOS)\n\n if args.dontTraceStartPatterns is not None and args.dontTraceEndPatterns is not None:\n start_patterns = tuple(args.dontTraceStartPatterns)\n end_patterns = tuple(args.dontTraceEndPatterns)\n self.api.set_dont_trace_start_end_patterns(py_db, start_patterns, end_patterns)\n\n if args.skipSuspendOnBreakpointException is not None:\n py_db.skip_suspend_on_breakpoint_exception = tuple(get_exception_class(x) for x in args.skipSuspendOnBreakpointException)\n\n if args.skipPrintBreakpointException is not None:\n py_db.skip_print_breakpoint_exception = tuple(get_exception_class(x) for x in args.skipPrintBreakpointException)\n\n if args.multiThreadsSingleNotification is not None:\n py_db.multi_threads_single_notification = args.multiThreadsSingleNotification\n\n # TODO: Support other common settings. Note that not all of these might be relevant to python.\n # JustMyCodeStepping: 0 or 1\n # AllowOutOfProcessSymbols: 0 or 1\n # DisableJITOptimization: 0 or 1\n # InterpreterOptions: 0 or 1\n # StopOnExceptionCrossingManagedBoundary: 0 or 1\n # WarnIfNoUserCodeOnLaunch: 0 or 1\n # EnableStepFiltering: true of false\n\n response = pydevd_base_schema.build_response(request, kwargs={"body": {}})\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def on_pydevdsysteminfo_request(self, py_db, request):\n try:\n pid = os.getpid()\n except AttributeError:\n pid = None\n\n # It's possible to have the ppid reported from args. In this case, use that instead of the\n # real ppid (athough we're using `ppid`, what we want in meaning is the `launcher_pid` --\n # so, if a python process is launched from another python process, consider that process the\n # parent and not any intermediary stubs).\n\n ppid = py_db.get_arg_ppid() or self.api.get_ppid()\n\n try:\n impl_desc = platform.python_implementation()\n except AttributeError:\n impl_desc = PY_IMPL_NAME\n\n py_info = pydevd_schema.PydevdPythonInfo(\n version=PY_VERSION_STR,\n implementation=pydevd_schema.PydevdPythonImplementationInfo(\n name=PY_IMPL_NAME,\n version=PY_IMPL_VERSION_STR,\n description=impl_desc,\n ),\n )\n platform_info = pydevd_schema.PydevdPlatformInfo(name=sys.platform)\n process_info = pydevd_schema.PydevdProcessInfo(\n pid=pid,\n ppid=ppid,\n executable=sys.executable,\n bitness=64 if IS_64BIT_PROCESS else 32,\n )\n pydevd_info = pydevd_schema.PydevdInfo(\n usingCython=USING_CYTHON,\n usingFrameEval=USING_FRAME_EVAL,\n )\n body = {\n "python": py_info,\n "platform": platform_info,\n "process": process_info,\n "pydevd": pydevd_info,\n }\n response = pydevd_base_schema.build_response(request, kwargs={"body": body})\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n def on_setpydevdsourcemap_request(self, py_db, request):\n args = request.arguments # : :type args: SetPydevdSourceMapArguments\n SourceMappingEntry = self.api.SourceMappingEntry\n\n path = args.source.path\n source_maps = args.pydevdSourceMaps\n # : :type source_map: PydevdSourceMap\n new_mappings = [\n SourceMappingEntry(\n source_map["line"],\n source_map["endLine"],\n source_map["runtimeLine"],\n self.api.filename_to_str(source_map["runtimeSource"]["path"]),\n )\n for source_map in source_maps\n ]\n\n error_msg = self.api.set_source_mapping(py_db, path, new_mappings)\n if error_msg:\n response = pydevd_base_schema.build_response(\n request,\n kwargs={\n "body": {},\n "success": False,\n "message": error_msg,\n },\n )\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n\n response = pydevd_base_schema.build_response(request)\n return NetCommand(CMD_RETURN, 0, response, is_json=True)\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_process_net_command_json.py
|
pydevd_process_net_command_json.py
|
Python
| 58,893 | 0.75 | 0.154357 | 0.066784 |
react-lib
| 714 |
2024-03-05T20:32:29.890195
|
BSD-3-Clause
| false |
704320f5c60815d1403334104ba33128
|
import sys\nfrom _pydevd_bundle import pydevd_xml\nfrom os.path import basename\nfrom _pydev_bundle import pydev_log\nfrom urllib.parse import unquote_plus\nfrom _pydevd_bundle.pydevd_constants import IS_PY311_OR_GREATER\n\n\n# ===================================================================================================\n# print_var_node\n# ===================================================================================================\ndef print_var_node(xml_node, stream):\n name = xml_node.getAttribute("name")\n value = xml_node.getAttribute("value")\n val_type = xml_node.getAttribute("type")\n\n found_as = xml_node.getAttribute("found_as")\n stream.write("Name: ")\n stream.write(unquote_plus(name))\n stream.write(", Value: ")\n stream.write(unquote_plus(value))\n stream.write(", Type: ")\n stream.write(unquote_plus(val_type))\n if found_as:\n stream.write(", Found as: %s" % (unquote_plus(found_as),))\n stream.write("\n")\n\n\n# ===================================================================================================\n# print_referrers\n# ===================================================================================================\ndef print_referrers(obj, stream=None):\n if stream is None:\n stream = sys.stdout\n result = get_referrer_info(obj)\n from xml.dom.minidom import parseString\n\n dom = parseString(result)\n\n xml = dom.getElementsByTagName("xml")[0]\n for node in xml.childNodes:\n if node.nodeType == node.TEXT_NODE:\n continue\n\n if node.localName == "for":\n stream.write("Searching references for: ")\n for child in node.childNodes:\n if child.nodeType == node.TEXT_NODE:\n continue\n print_var_node(child, stream)\n\n elif node.localName == "var":\n stream.write("Referrer found: ")\n print_var_node(node, stream)\n\n else:\n sys.stderr.write("Unhandled node: %s\n" % (node,))\n\n return result\n\n\n# ===================================================================================================\n# get_referrer_info\n# ===================================================================================================\ndef get_referrer_info(searched_obj):\n DEBUG = 0\n if DEBUG:\n sys.stderr.write("Getting referrers info.\n")\n try:\n try:\n if searched_obj is None:\n ret = ["<xml>\n"]\n\n ret.append("<for>\n")\n ret.append(\n pydevd_xml.var_to_xml(\n searched_obj, "Skipping getting referrers for None", additional_in_xml=' id="%s"' % (id(searched_obj),)\n )\n )\n ret.append("</for>\n")\n ret.append("</xml>")\n ret = "".join(ret)\n return ret\n\n obj_id = id(searched_obj)\n\n try:\n if DEBUG:\n sys.stderr.write("Getting referrers...\n")\n import gc\n\n referrers = gc.get_referrers(searched_obj)\n except:\n pydev_log.exception()\n ret = ["<xml>\n"]\n\n ret.append("<for>\n")\n ret.append(\n pydevd_xml.var_to_xml(\n searched_obj, "Exception raised while trying to get_referrers.", additional_in_xml=' id="%s"' % (id(searched_obj),)\n )\n )\n ret.append("</for>\n")\n ret.append("</xml>")\n ret = "".join(ret)\n return ret\n\n if DEBUG:\n sys.stderr.write("Found %s referrers.\n" % (len(referrers),))\n\n curr_frame = sys._getframe()\n frame_type = type(curr_frame)\n\n # Ignore this frame and any caller frame of this frame\n\n ignore_frames = {} # Should be a set, but it's not available on all python versions.\n while curr_frame is not None:\n if basename(curr_frame.f_code.co_filename).startswith("pydev"):\n ignore_frames[curr_frame] = 1\n curr_frame = curr_frame.f_back\n\n ret = ["<xml>\n"]\n\n ret.append("<for>\n")\n if DEBUG:\n sys.stderr.write('Searching Referrers of obj with id="%s"\n' % (obj_id,))\n\n ret.append(pydevd_xml.var_to_xml(searched_obj, 'Referrers of obj with id="%s"' % (obj_id,)))\n ret.append("</for>\n")\n\n curr_frame = sys._getframe()\n all_objects = None\n\n for r in referrers:\n try:\n if r in ignore_frames:\n continue # Skip the references we may add ourselves\n except:\n pass # Ok: unhashable type checked...\n\n if r is referrers:\n continue\n\n if r is curr_frame.f_locals:\n continue\n\n r_type = type(r)\n r_id = str(id(r))\n\n representation = str(r_type)\n\n found_as = ""\n if r_type == frame_type:\n if DEBUG:\n sys.stderr.write("Found frame referrer: %r\n" % (r,))\n for key, val in r.f_locals.items():\n if val is searched_obj:\n found_as = key\n break\n\n elif r_type == dict:\n if DEBUG:\n sys.stderr.write("Found dict referrer: %r\n" % (r,))\n\n # Try to check if it's a value in the dict (and under which key it was found)\n for key, val in r.items():\n if val is searched_obj:\n found_as = key\n if DEBUG:\n sys.stderr.write(" Found as %r in dict\n" % (found_as,))\n break\n\n # Ok, there's one annoying thing: many times we find it in a dict from an instance,\n # but with this we don't directly have the class, only the dict, so, to workaround that\n # we iterate over all reachable objects ad check if one of those has the given dict.\n if all_objects is None:\n all_objects = gc.get_objects()\n\n for x in all_objects:\n try:\n if getattr(x, "__dict__", None) is r:\n r = x\n r_type = type(x)\n r_id = str(id(r))\n representation = str(r_type)\n break\n except:\n pass # Just ignore any error here (i.e.: ReferenceError, etc.)\n\n elif r_type in (tuple, list):\n if DEBUG:\n sys.stderr.write("Found tuple referrer: %r\n" % (r,))\n\n for i, x in enumerate(r):\n if x is searched_obj:\n found_as = "%s[%s]" % (r_type.__name__, i)\n if DEBUG:\n sys.stderr.write(" Found as %s in tuple: \n" % (found_as,))\n break\n\n elif IS_PY311_OR_GREATER:\n # Up to Python 3.10, gc.get_referrers for an instance actually returned the\n # object.__dict__, but on Python 3.11 it returns the actual object, so,\n # handling is a bit easier (we don't need the workaround from the dict\n # case to find the actual instance, we just need to find the attribute name).\n if DEBUG:\n sys.stderr.write("Found dict referrer: %r\n" % (r,))\n\n dct = getattr(r, "__dict__", None)\n if dct:\n # Try to check if it's a value in the dict (and under which key it was found)\n for key, val in dct.items():\n if val is searched_obj:\n found_as = key\n if DEBUG:\n sys.stderr.write(" Found as %r in object instance\n" % (found_as,))\n break\n\n if found_as:\n if not isinstance(found_as, str):\n found_as = str(found_as)\n found_as = ' found_as="%s"' % (pydevd_xml.make_valid_xml_value(found_as),)\n\n ret.append(pydevd_xml.var_to_xml(r, representation, additional_in_xml=' id="%s"%s' % (r_id, found_as)))\n finally:\n if DEBUG:\n sys.stderr.write("Done searching for references.\n")\n\n # If we have any exceptions, don't keep dangling references from this frame to any of our objects.\n all_objects = None\n referrers = None\n searched_obj = None\n r = None\n x = None\n key = None\n val = None\n curr_frame = None\n ignore_frames = None\n except:\n pydev_log.exception()\n ret = ["<xml>\n"]\n\n ret.append("<for>\n")\n ret.append(pydevd_xml.var_to_xml(searched_obj, "Error getting referrers for:", additional_in_xml=' id="%s"' % (id(searched_obj),)))\n ret.append("</for>\n")\n ret.append("</xml>")\n ret = "".join(ret)\n return ret\n\n ret.append("</xml>")\n ret = "".join(ret)\n return ret\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_referrers.py
|
pydevd_referrers.py
|
Python
| 9,930 | 0.95 | 0.269841 | 0.096618 |
awesome-app
| 667 |
2023-10-24T19:02:06.906779
|
Apache-2.0
| false |
5cace416c081bc2819f8da5281f2bf93
|
"""\nBased on the python xreload.\n\nChanges\n======================\n\n1. we don't recreate the old namespace from new classes. Rather, we keep the existing namespace,\nload a new version of it and update only some of the things we can inplace. That way, we don't break\nthings such as singletons or end up with a second representation of the same class in memory.\n\n2. If we find it to be a __metaclass__, we try to update it as a regular class.\n\n3. We don't remove old attributes (and leave them lying around even if they're no longer used).\n\n4. Reload hooks were changed\n\nThese changes make it more stable, especially in the common case (where in a debug session only the\ncontents of a function are changed), besides providing flexibility for users that want to extend\non it.\n\n\n\nHooks\n======================\n\nClasses/modules can be specially crafted to work with the reload (so that it can, for instance,\nupdate some constant which was changed).\n\n1. To participate in the change of some attribute:\n\n In a module:\n\n __xreload_old_new__(namespace, name, old, new)\n\n in a class:\n\n @classmethod\n __xreload_old_new__(cls, name, old, new)\n\n A class or module may include a method called '__xreload_old_new__' which is called when we're\n unable to reload a given attribute.\n\n\n\n2. To do something after the whole reload is finished:\n\n In a module:\n\n __xreload_after_reload_update__(namespace):\n\n In a class:\n\n @classmethod\n __xreload_after_reload_update__(cls):\n\n\n A class or module may include a method called '__xreload_after_reload_update__' which is called\n after the reload finishes.\n\n\nImportant: when providing a hook, always use the namespace or cls provided and not anything in the global\nnamespace, as the global namespace are only temporarily created during the reload and may not reflect the\nactual application state (while the cls and namespace passed are).\n\n\nCurrent limitations\n======================\n\n\n- Attributes/constants are added, but not changed (so singletons and the application state is not\n broken -- use provided hooks to workaround it).\n\n- Code using metaclasses may not always work.\n\n- Functions and methods using decorators (other than classmethod and staticmethod) are not handled\n correctly.\n\n- Renamings are not handled correctly.\n\n- Dependent modules are not reloaded.\n\n- New __slots__ can't be added to existing classes.\n\n\nInfo\n======================\n\nOriginal: http://svn.python.org/projects/sandbox/trunk/xreload/xreload.py\nNote: it seems https://github.com/plone/plone.reload/blob/master/plone/reload/xreload.py enhances it (to check later)\n\nInteresting alternative: https://code.google.com/p/reimport/\n\nAlternative to reload().\n\nThis works by executing the module in a scratch namespace, and then patching classes, methods and\nfunctions in place. This avoids the need to patch instances. New objects are copied into the\ntarget namespace.\n\n"""\n\nfrom _pydev_bundle.pydev_imports import execfile\nfrom _pydevd_bundle import pydevd_dont_trace\nimport types\nfrom _pydev_bundle import pydev_log\nfrom _pydevd_bundle.pydevd_constants import get_global_debugger\n\nNO_DEBUG = 0\nLEVEL1 = 1\nLEVEL2 = 2\n\nDEBUG = NO_DEBUG\n\n\ndef write_err(*args):\n py_db = get_global_debugger()\n if py_db is not None:\n new_lst = []\n for a in args:\n new_lst.append(str(a))\n\n msg = " ".join(new_lst)\n s = "code reload: %s\n" % (msg,)\n cmd = py_db.cmd_factory.make_io_message(s, 2)\n if py_db.writer is not None:\n py_db.writer.add_command(cmd)\n\n\ndef notify_info0(*args):\n write_err(*args)\n\n\ndef notify_info(*args):\n if DEBUG >= LEVEL1:\n write_err(*args)\n\n\ndef notify_info2(*args):\n if DEBUG >= LEVEL2:\n write_err(*args)\n\n\ndef notify_error(*args):\n write_err(*args)\n\n\n# =======================================================================================================================\n# code_objects_equal\n# =======================================================================================================================\ndef code_objects_equal(code0, code1):\n for d in dir(code0):\n if d.startswith("_") or "line" in d or d in ("replace", "co_positions", "co_qualname"):\n continue\n if getattr(code0, d) != getattr(code1, d):\n return False\n return True\n\n\n# =======================================================================================================================\n# xreload\n# =======================================================================================================================\ndef xreload(mod):\n """Reload a module in place, updating classes, methods and functions.\n\n mod: a module object\n\n Returns a boolean indicating whether a change was done.\n """\n r = Reload(mod)\n r.apply()\n found_change = r.found_change\n r = None\n pydevd_dont_trace.clear_trace_filter_cache()\n return found_change\n\n\n# This isn't actually used... Initially I planned to reload variables which are immutable on the\n# namespace, but this can destroy places where we're saving state, which may not be what we want,\n# so, we're being conservative and giving the user hooks if he wants to do a reload.\n#\n# immutable_types = [int, str, float, tuple] #That should be common to all Python versions\n#\n# for name in 'long basestr unicode frozenset'.split():\n# try:\n# immutable_types.append(__builtins__[name])\n# except:\n# pass #Just ignore: not all python versions are created equal.\n# immutable_types = tuple(immutable_types)\n\n\n# =======================================================================================================================\n# Reload\n# =======================================================================================================================\nclass Reload:\n def __init__(self, mod, mod_name=None, mod_filename=None):\n self.mod = mod\n if mod_name:\n self.mod_name = mod_name\n else:\n self.mod_name = mod.__name__ if mod is not None else None\n\n if mod_filename:\n self.mod_filename = mod_filename\n else:\n self.mod_filename = mod.__file__ if mod is not None else None\n\n self.found_change = False\n\n def apply(self):\n mod = self.mod\n self._on_finish_callbacks = []\n try:\n # Get the module namespace (dict) early; this is part of the type check\n modns = mod.__dict__\n\n # Execute the code. We copy the module dict to a temporary; then\n # clear the module dict; then execute the new code in the module\n # dict; then swap things back and around. This trick (due to\n # Glyph Lefkowitz) ensures that the (readonly) __globals__\n # attribute of methods and functions is set to the correct dict\n # object.\n new_namespace = modns.copy()\n new_namespace.clear()\n if self.mod_filename:\n new_namespace["__file__"] = self.mod_filename\n try:\n new_namespace["__builtins__"] = __builtins__\n except NameError:\n raise # Ok if not there.\n\n if self.mod_name:\n new_namespace["__name__"] = self.mod_name\n if new_namespace["__name__"] == "__main__":\n # We do this because usually the __main__ starts-up the program, guarded by\n # the if __name__ == '__main__', but we don't want to start the program again\n # on a reload.\n new_namespace["__name__"] = "__main_reloaded__"\n\n execfile(self.mod_filename, new_namespace, new_namespace)\n # Now we get to the hard part\n oldnames = set(modns)\n newnames = set(new_namespace)\n\n # Create new tokens (note: not deleting existing)\n for name in newnames - oldnames:\n notify_info0("Added:", name, "to namespace")\n self.found_change = True\n modns[name] = new_namespace[name]\n\n # Update in-place what we can\n for name in oldnames & newnames:\n self._update(modns, name, modns[name], new_namespace[name])\n\n self._handle_namespace(modns)\n\n for c in self._on_finish_callbacks:\n c()\n del self._on_finish_callbacks[:]\n except:\n pydev_log.exception()\n\n def _handle_namespace(self, namespace, is_class_namespace=False):\n on_finish = None\n if is_class_namespace:\n xreload_after_update = getattr(namespace, "__xreload_after_reload_update__", None)\n if xreload_after_update is not None:\n self.found_change = True\n on_finish = lambda: xreload_after_update()\n\n elif "__xreload_after_reload_update__" in namespace:\n xreload_after_update = namespace["__xreload_after_reload_update__"]\n self.found_change = True\n on_finish = lambda: xreload_after_update(namespace)\n\n if on_finish is not None:\n # If a client wants to know about it, give him a chance.\n self._on_finish_callbacks.append(on_finish)\n\n def _update(self, namespace, name, oldobj, newobj, is_class_namespace=False):\n """Update oldobj, if possible in place, with newobj.\n\n If oldobj is immutable, this simply returns newobj.\n\n Args:\n oldobj: the object to be updated\n newobj: the object used as the source for the update\n """\n try:\n notify_info2("Updating: ", oldobj)\n if oldobj is newobj:\n # Probably something imported\n return\n\n if type(oldobj) is not type(newobj):\n # Cop-out: if the type changed, give up\n if name not in ("__builtins__",):\n notify_error("Type of: %s (old: %s != new: %s) changed... Skipping." % (name, type(oldobj), type(newobj)))\n return\n\n if isinstance(newobj, types.FunctionType):\n self._update_function(oldobj, newobj)\n return\n\n if isinstance(newobj, types.MethodType):\n self._update_method(oldobj, newobj)\n return\n\n if isinstance(newobj, classmethod):\n self._update_classmethod(oldobj, newobj)\n return\n\n if isinstance(newobj, staticmethod):\n self._update_staticmethod(oldobj, newobj)\n return\n\n if hasattr(types, "ClassType"):\n classtype = (types.ClassType, type) # object is not instance of types.ClassType.\n else:\n classtype = type\n\n if isinstance(newobj, classtype):\n self._update_class(oldobj, newobj)\n return\n\n # New: dealing with metaclasses.\n if hasattr(newobj, "__metaclass__") and hasattr(newobj, "__class__") and newobj.__metaclass__ == newobj.__class__:\n self._update_class(oldobj, newobj)\n return\n\n if namespace is not None:\n # Check for the `__xreload_old_new__` protocol (don't even compare things\n # as even doing a comparison may break things -- see: https://github.com/microsoft/debugpy/issues/615).\n xreload_old_new = None\n if is_class_namespace:\n xreload_old_new = getattr(namespace, "__xreload_old_new__", None)\n if xreload_old_new is not None:\n self.found_change = True\n xreload_old_new(name, oldobj, newobj)\n\n elif "__xreload_old_new__" in namespace:\n xreload_old_new = namespace["__xreload_old_new__"]\n xreload_old_new(namespace, name, oldobj, newobj)\n self.found_change = True\n\n # Too much information to the user...\n # else:\n # notify_info0('%s NOT updated. Create __xreload_old_new__(name, old, new) for custom reload' % (name,))\n\n except:\n notify_error("Exception found when updating %s. Proceeding for other items." % (name,))\n pydev_log.exception()\n\n # All of the following functions have the same signature as _update()\n\n def _update_function(self, oldfunc, newfunc):\n """Update a function object."""\n oldfunc.__doc__ = newfunc.__doc__\n oldfunc.__dict__.update(newfunc.__dict__)\n\n try:\n newfunc.__code__\n attr_name = "__code__"\n except AttributeError:\n newfunc.func_code\n attr_name = "func_code"\n\n old_code = getattr(oldfunc, attr_name)\n new_code = getattr(newfunc, attr_name)\n if not code_objects_equal(old_code, new_code):\n notify_info0("Updated function code:", oldfunc)\n setattr(oldfunc, attr_name, new_code)\n self.found_change = True\n\n try:\n oldfunc.__defaults__ = newfunc.__defaults__\n except AttributeError:\n oldfunc.func_defaults = newfunc.func_defaults\n\n return oldfunc\n\n def _update_method(self, oldmeth, newmeth):\n """Update a method object."""\n # XXX What if im_func is not a function?\n if hasattr(oldmeth, "im_func") and hasattr(newmeth, "im_func"):\n self._update(None, None, oldmeth.im_func, newmeth.im_func)\n elif hasattr(oldmeth, "__func__") and hasattr(newmeth, "__func__"):\n self._update(None, None, oldmeth.__func__, newmeth.__func__)\n return oldmeth\n\n def _update_class(self, oldclass, newclass):\n """Update a class object."""\n olddict = oldclass.__dict__\n newdict = newclass.__dict__\n\n oldnames = set(olddict)\n newnames = set(newdict)\n\n for name in newnames - oldnames:\n setattr(oldclass, name, newdict[name])\n notify_info0("Added:", name, "to", oldclass)\n self.found_change = True\n\n # Note: not removing old things...\n # for name in oldnames - newnames:\n # notify_info('Removed:', name, 'from', oldclass)\n # delattr(oldclass, name)\n\n for name in (oldnames & newnames) - set(["__dict__", "__doc__"]):\n self._update(oldclass, name, olddict[name], newdict[name], is_class_namespace=True)\n\n old_bases = getattr(oldclass, "__bases__", None)\n new_bases = getattr(newclass, "__bases__", None)\n if str(old_bases) != str(new_bases):\n notify_error("Changing the hierarchy of a class is not supported. %s may be inconsistent." % (oldclass,))\n\n self._handle_namespace(oldclass, is_class_namespace=True)\n\n def _update_classmethod(self, oldcm, newcm):\n """Update a classmethod update."""\n # While we can't modify the classmethod object itself (it has no\n # mutable attributes), we *can* extract the underlying function\n # (by calling __get__(), which returns a method object) and update\n # it in-place. We don't have the class available to pass to\n # __get__() but any object except None will do.\n self._update(None, None, oldcm.__get__(0), newcm.__get__(0))\n\n def _update_staticmethod(self, oldsm, newsm):\n """Update a staticmethod update."""\n # While we can't modify the staticmethod object itself (it has no\n # mutable attributes), we *can* extract the underlying function\n # (by calling __get__(), which returns it) and update it in-place.\n # We don't have the class available to pass to __get__() but any\n # object except None will do.\n self._update(None, None, oldsm.__get__(0), newsm.__get__(0))\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_reload.py
|
pydevd_reload.py
|
Python
| 16,212 | 0.95 | 0.2194 | 0.18323 |
awesome-app
| 566 |
2024-05-02T19:44:58.655189
|
MIT
| false |
930f42555091b913f7c2b8d014268d3a
|
from _pydev_bundle import pydev_log\nfrom _pydevd_bundle.pydevd_utils import hasattr_checked, DAPGrouper, Timer\nfrom io import StringIO\nimport traceback\nfrom os.path import basename\n\nfrom functools import partial\nfrom _pydevd_bundle.pydevd_constants import (\n IS_PY36_OR_GREATER,\n MethodWrapperType,\n RETURN_VALUES_DICT,\n DebugInfoHolder,\n IS_PYPY,\n GENERATED_LEN_ATTR_NAME,\n)\nfrom _pydevd_bundle.pydevd_safe_repr import SafeRepr\nfrom _pydevd_bundle import pydevd_constants\n\nTOO_LARGE_MSG = "Maximum number of items (%s) reached. To show more items customize the value of the PYDEVD_CONTAINER_RANDOM_ACCESS_MAX_ITEMS environment variable."\nTOO_LARGE_ATTR = "Unable to handle:"\n\n\n# =======================================================================================================================\n# UnableToResolveVariableException\n# =======================================================================================================================\nclass UnableToResolveVariableException(Exception):\n pass\n\n\ntry:\n from collections import OrderedDict\nexcept:\n OrderedDict = dict\n\ntry:\n import java.lang # @UnresolvedImport\nexcept:\n pass\n\n# =======================================================================================================================\n# See: pydevd_extension_api module for resolver interface\n# =======================================================================================================================\n\n\ndef sorted_attributes_key(attr_name):\n if attr_name.startswith("__"):\n if attr_name.endswith("__"):\n # __ double under before and after __\n return (3, attr_name)\n else:\n # __ double under before\n return (2, attr_name)\n elif attr_name.startswith("_"):\n # _ single under\n return (1, attr_name)\n else:\n # Regular (Before anything)\n return (0, attr_name)\n\n\n# =======================================================================================================================\n# DefaultResolver\n# =======================================================================================================================\nclass DefaultResolver:\n """\n DefaultResolver is the class that'll actually resolve how to show some variable.\n """\n\n def resolve(self, var, attribute):\n return getattr(var, attribute)\n\n def get_contents_debug_adapter_protocol(self, obj, fmt=None):\n if MethodWrapperType:\n dct, used___dict__ = self._get_py_dictionary(obj)\n else:\n dct = self._get_jy_dictionary(obj)[0]\n\n lst = sorted(dct.items(), key=lambda tup: sorted_attributes_key(tup[0]))\n if used___dict__:\n eval_name = ".__dict__[%s]"\n else:\n eval_name = ".%s"\n\n ret = []\n for attr_name, attr_value in lst:\n entry = (attr_name, attr_value, eval_name % attr_name)\n ret.append(entry)\n\n return ret\n\n def get_dictionary(self, var, names=None, used___dict__=False):\n if MethodWrapperType:\n return self._get_py_dictionary(var, names, used___dict__=used___dict__)[0]\n else:\n return self._get_jy_dictionary(var)[0]\n\n def _get_jy_dictionary(self, obj):\n ret = {}\n found = java.util.HashMap()\n\n original = obj\n if hasattr_checked(obj, "__class__") and obj.__class__ == java.lang.Class:\n # get info about superclasses\n classes = []\n classes.append(obj)\n c = obj.getSuperclass()\n while c != None:\n classes.append(c)\n c = c.getSuperclass()\n\n # get info about interfaces\n interfs = []\n for obj in classes:\n interfs.extend(obj.getInterfaces())\n classes.extend(interfs)\n\n # now is the time when we actually get info on the declared methods and fields\n for obj in classes:\n declaredMethods = obj.getDeclaredMethods()\n declaredFields = obj.getDeclaredFields()\n for i in range(len(declaredMethods)):\n name = declaredMethods[i].getName()\n ret[name] = declaredMethods[i].toString()\n found.put(name, 1)\n\n for i in range(len(declaredFields)):\n name = declaredFields[i].getName()\n found.put(name, 1)\n # if declaredFields[i].isAccessible():\n declaredFields[i].setAccessible(True)\n # ret[name] = declaredFields[i].get( declaredFields[i] )\n try:\n ret[name] = declaredFields[i].get(original)\n except:\n ret[name] = declaredFields[i].toString()\n\n # this simple dir does not always get all the info, that's why we have the part before\n # (e.g.: if we do a dir on String, some methods that are from other interfaces such as\n # charAt don't appear)\n try:\n d = dir(original)\n for name in d:\n if found.get(name) != 1:\n ret[name] = getattr(original, name)\n except:\n # sometimes we're unable to do a dir\n pass\n\n return ret\n\n def get_names(self, var):\n used___dict__ = False\n try:\n names = dir(var)\n except Exception:\n names = []\n if not names:\n if hasattr_checked(var, "__dict__"):\n names = list(var.__dict__)\n used___dict__ = True\n return names, used___dict__\n\n def _get_py_dictionary(self, var, names=None, used___dict__=False):\n """\n :return tuple(names, used___dict__), where used___dict__ means we have to access\n using obj.__dict__[name] instead of getattr(obj, name)\n """\n\n # On PyPy we never show functions. This is because of a corner case where PyPy becomes\n # absurdly slow -- it takes almost half a second to introspect a single numpy function (so,\n # the related test, "test_case_16_resolve_numpy_array", times out... this probably isn't\n # specific to numpy, but to any library where the CPython bridge is used, but as we\n # can't be sure in the debugger, we play it safe and don't show it at all).\n filter_function = IS_PYPY\n\n if not names:\n names, used___dict__ = self.get_names(var)\n d = {}\n\n # Be aware that the order in which the filters are applied attempts to\n # optimize the operation by removing as many items as possible in the\n # first filters, leaving fewer items for later filters\n\n timer = Timer()\n cls = type(var)\n for name in names:\n try:\n name_as_str = name\n if name_as_str.__class__ != str:\n name_as_str = "%r" % (name_as_str,)\n\n if not used___dict__:\n attr = getattr(var, name)\n else:\n attr = var.__dict__[name]\n\n # filter functions?\n if filter_function:\n if inspect.isroutine(attr) or isinstance(attr, MethodWrapperType):\n continue\n except:\n # if some error occurs getting it, let's put it to the user.\n strIO = StringIO()\n traceback.print_exc(file=strIO)\n attr = strIO.getvalue()\n\n finally:\n timer.report_if_getting_attr_slow(cls, name_as_str)\n\n d[name_as_str] = attr\n\n return d, used___dict__\n\n\nclass DAPGrouperResolver:\n def get_contents_debug_adapter_protocol(self, obj, fmt=None):\n return obj.get_contents_debug_adapter_protocol()\n\n\n_basic_immutable_types = (int, float, complex, str, bytes, type(None), bool, frozenset)\n\n\ndef _does_obj_repr_evaluate_to_obj(obj):\n """\n If obj is an object where evaluating its representation leads to\n the same object, return True, otherwise, return False.\n """\n try:\n if isinstance(obj, tuple):\n for o in obj:\n if not _does_obj_repr_evaluate_to_obj(o):\n return False\n return True\n else:\n return isinstance(obj, _basic_immutable_types)\n except:\n return False\n\n\n# =======================================================================================================================\n# DictResolver\n# =======================================================================================================================\nclass DictResolver:\n sort_keys = not IS_PY36_OR_GREATER\n\n def resolve(self, dct, key):\n if key in (GENERATED_LEN_ATTR_NAME, TOO_LARGE_ATTR):\n return None\n\n if "(" not in key:\n # we have to treat that because the dict resolver is also used to directly resolve the global and local\n # scopes (which already have the items directly)\n try:\n return dct[key]\n except:\n return getattr(dct, key)\n\n # ok, we have to iterate over the items to find the one that matches the id, because that's the only way\n # to actually find the reference from the string we have before.\n expected_id = int(key.split("(")[-1][:-1])\n for key, val in dct.items():\n if id(key) == expected_id:\n return val\n\n raise UnableToResolveVariableException()\n\n def key_to_str(self, key, fmt=None):\n if fmt is not None:\n if fmt.get("hex", False):\n safe_repr = SafeRepr()\n safe_repr.convert_to_hex = True\n return safe_repr(key)\n return "%r" % (key,)\n\n def init_dict(self):\n return {}\n\n def get_contents_debug_adapter_protocol(self, dct, fmt=None):\n """\n This method is to be used in the case where the variables are all saved by its id (and as\n such don't need to have the `resolve` method called later on, so, keys don't need to\n embed the reference in the key).\n\n Note that the return should be ordered.\n\n :return list(tuple(name:str, value:object, evaluateName:str))\n """\n ret = []\n\n i = 0\n\n found_representations = set()\n\n for key, val in dct.items():\n i += 1\n key_as_str = self.key_to_str(key, fmt)\n\n if key_as_str not in found_representations:\n found_representations.add(key_as_str)\n else:\n # If the key would be a duplicate, add the key id (otherwise\n # VSCode won't show all keys correctly).\n # See: https://github.com/microsoft/debugpy/issues/148\n key_as_str = "%s (id: %s)" % (key_as_str, id(key))\n found_representations.add(key_as_str)\n\n if _does_obj_repr_evaluate_to_obj(key):\n s = self.key_to_str(key) # do not format the key\n eval_key_str = "[%s]" % (s,)\n else:\n eval_key_str = None\n ret.append((key_as_str, val, eval_key_str))\n if i >= pydevd_constants.PYDEVD_CONTAINER_RANDOM_ACCESS_MAX_ITEMS:\n ret.append((TOO_LARGE_ATTR, TOO_LARGE_MSG % (pydevd_constants.PYDEVD_CONTAINER_RANDOM_ACCESS_MAX_ITEMS,), None))\n break\n\n # in case the class extends built-in type and has some additional fields\n from_default_resolver = defaultResolver.get_contents_debug_adapter_protocol(dct, fmt)\n\n if from_default_resolver:\n ret = from_default_resolver + ret\n\n if self.sort_keys:\n ret = sorted(ret, key=lambda tup: sorted_attributes_key(tup[0]))\n\n ret.append((GENERATED_LEN_ATTR_NAME, len(dct), partial(_apply_evaluate_name, evaluate_name="len(%s)")))\n return ret\n\n def get_dictionary(self, dct):\n ret = self.init_dict()\n\n i = 0\n for key, val in dct.items():\n i += 1\n # we need to add the id because otherwise we cannot find the real object to get its contents later on.\n key = "%s (%s)" % (self.key_to_str(key), id(key))\n ret[key] = val\n if i >= pydevd_constants.PYDEVD_CONTAINER_RANDOM_ACCESS_MAX_ITEMS:\n ret[TOO_LARGE_ATTR] = TOO_LARGE_MSG % (pydevd_constants.PYDEVD_CONTAINER_RANDOM_ACCESS_MAX_ITEMS,)\n break\n\n # in case if the class extends built-in type and has some additional fields\n additional_fields = defaultResolver.get_dictionary(dct)\n ret.update(additional_fields)\n ret[GENERATED_LEN_ATTR_NAME] = len(dct)\n return ret\n\n\ndef _apply_evaluate_name(parent_name, evaluate_name):\n return evaluate_name % (parent_name,)\n\n\nclass MoreItemsRange:\n def __init__(self, value, from_i, to_i):\n self.value = value\n self.from_i = from_i\n self.to_i = to_i\n\n def get_contents_debug_adapter_protocol(self, _self, fmt=None):\n l = len(self.value)\n ret = []\n\n format_str = "%0" + str(int(len(str(l - 1)))) + "d"\n if fmt is not None and fmt.get("hex", False):\n format_str = "0x%0" + str(int(len(hex(l).lstrip("0x")))) + "x"\n\n for i, item in enumerate(self.value[self.from_i : self.to_i]):\n i += self.from_i\n ret.append((format_str % i, item, "[%s]" % i))\n return ret\n\n def get_dictionary(self, _self, fmt=None):\n dct = {}\n for key, obj, _ in self.get_contents_debug_adapter_protocol(self, fmt):\n dct[key] = obj\n return dct\n\n def resolve(self, attribute):\n """\n :param var: that's the original object we're dealing with.\n :param attribute: that's the key to resolve\n -- either the dict key in get_dictionary or the name in the dap protocol.\n """\n return self.value[int(attribute)]\n\n def __eq__(self, o):\n return isinstance(o, MoreItemsRange) and self.value is o.value and self.from_i == o.from_i and self.to_i == o.to_i\n\n def __str__(self):\n return "[%s:%s]" % (self.from_i, self.to_i)\n\n __repr__ = __str__\n\n\nclass MoreItems:\n def __init__(self, value, handled_items):\n self.value = value\n self.handled_items = handled_items\n\n def get_contents_debug_adapter_protocol(self, _self, fmt=None):\n total_items = len(self.value)\n remaining = total_items - self.handled_items\n bucket_size = pydevd_constants.PYDEVD_CONTAINER_BUCKET_SIZE\n\n from_i = self.handled_items\n to_i = from_i + min(bucket_size, remaining)\n\n ret = []\n while remaining > 0:\n remaining -= bucket_size\n more_items_range = MoreItemsRange(self.value, from_i, to_i)\n ret.append((str(more_items_range), more_items_range, None))\n\n from_i = to_i\n to_i = from_i + min(bucket_size, remaining)\n\n return ret\n\n def get_dictionary(self, _self, fmt=None):\n dct = {}\n for key, obj, _ in self.get_contents_debug_adapter_protocol(self, fmt):\n dct[key] = obj\n return dct\n\n def resolve(self, attribute):\n from_i, to_i = attribute[1:-1].split(":")\n from_i = int(from_i)\n to_i = int(to_i)\n return MoreItemsRange(self.value, from_i, to_i)\n\n def __eq__(self, o):\n return isinstance(o, MoreItems) and self.value is o.value\n\n def __str__(self):\n return "..."\n\n __repr__ = __str__\n\n\nclass ForwardInternalResolverToObject:\n """\n To be used when we provide some internal object that'll actually do the resolution.\n """\n\n def get_contents_debug_adapter_protocol(self, obj, fmt=None):\n return obj.get_contents_debug_adapter_protocol(fmt)\n\n def get_dictionary(self, var, fmt={}):\n return var.get_dictionary(var, fmt)\n\n def resolve(self, var, attribute):\n return var.resolve(attribute)\n\n\nclass TupleResolver: # to enumerate tuples and lists\n def resolve(self, var, attribute):\n """\n :param var: that's the original object we're dealing with.\n :param attribute: that's the key to resolve\n -- either the dict key in get_dictionary or the name in the dap protocol.\n """\n if attribute in (GENERATED_LEN_ATTR_NAME, TOO_LARGE_ATTR):\n return None\n try:\n return var[int(attribute)]\n except:\n if attribute == "more":\n return MoreItems(var, pydevd_constants.PYDEVD_CONTAINER_INITIAL_EXPANDED_ITEMS)\n\n return getattr(var, attribute)\n\n def get_contents_debug_adapter_protocol(self, lst, fmt=None):\n """\n This method is to be used in the case where the variables are all saved by its id (and as\n such don't need to have the `resolve` method called later on, so, keys don't need to\n embed the reference in the key).\n\n Note that the return should be ordered.\n\n :return list(tuple(name:str, value:object, evaluateName:str))\n """\n lst_len = len(lst)\n ret = []\n\n format_str = "%0" + str(int(len(str(lst_len - 1)))) + "d"\n if fmt is not None and fmt.get("hex", False):\n format_str = "0x%0" + str(int(len(hex(lst_len).lstrip("0x")))) + "x"\n\n initial_expanded = pydevd_constants.PYDEVD_CONTAINER_INITIAL_EXPANDED_ITEMS\n for i, item in enumerate(lst):\n ret.append((format_str % i, item, "[%s]" % i))\n\n if i >= initial_expanded - 1:\n if (lst_len - initial_expanded) < pydevd_constants.PYDEVD_CONTAINER_BUCKET_SIZE:\n # Special case: if we have just 1 more bucket just put it inline.\n item = MoreItemsRange(lst, initial_expanded, lst_len)\n\n else:\n # Multiple buckets\n item = MoreItems(lst, initial_expanded)\n ret.append(("more", item, None))\n break\n\n # Needed in case the class extends the built-in type and has some additional fields.\n from_default_resolver = defaultResolver.get_contents_debug_adapter_protocol(lst, fmt=fmt)\n if from_default_resolver:\n ret = from_default_resolver + ret\n\n ret.append((GENERATED_LEN_ATTR_NAME, len(lst), partial(_apply_evaluate_name, evaluate_name="len(%s)")))\n return ret\n\n def get_dictionary(self, var, fmt={}):\n l = len(var)\n d = {}\n\n format_str = "%0" + str(int(len(str(l - 1)))) + "d"\n if fmt is not None and fmt.get("hex", False):\n format_str = "0x%0" + str(int(len(hex(l).lstrip("0x")))) + "x"\n\n initial_expanded = pydevd_constants.PYDEVD_CONTAINER_INITIAL_EXPANDED_ITEMS\n for i, item in enumerate(var):\n d[format_str % i] = item\n\n if i >= initial_expanded - 1:\n item = MoreItems(var, initial_expanded)\n d["more"] = item\n break\n\n # in case if the class extends built-in type and has some additional fields\n additional_fields = defaultResolver.get_dictionary(var)\n d.update(additional_fields)\n d[GENERATED_LEN_ATTR_NAME] = len(var)\n return d\n\n\n# =======================================================================================================================\n# SetResolver\n# =======================================================================================================================\nclass SetResolver:\n """\n Resolves a set as dict id(object)->object\n """\n\n def get_contents_debug_adapter_protocol(self, obj, fmt=None):\n ret = []\n\n for i, item in enumerate(obj):\n ret.append((str(id(item)), item, None))\n\n if i >= pydevd_constants.PYDEVD_CONTAINER_RANDOM_ACCESS_MAX_ITEMS:\n ret.append((TOO_LARGE_ATTR, TOO_LARGE_MSG % (pydevd_constants.PYDEVD_CONTAINER_RANDOM_ACCESS_MAX_ITEMS,), None))\n break\n\n # Needed in case the class extends the built-in type and has some additional fields.\n from_default_resolver = defaultResolver.get_contents_debug_adapter_protocol(obj, fmt=fmt)\n if from_default_resolver:\n ret = from_default_resolver + ret\n ret.append((GENERATED_LEN_ATTR_NAME, len(obj), partial(_apply_evaluate_name, evaluate_name="len(%s)")))\n return ret\n\n def resolve(self, var, attribute):\n if attribute in (GENERATED_LEN_ATTR_NAME, TOO_LARGE_ATTR):\n return None\n\n try:\n attribute = int(attribute)\n except:\n return getattr(var, attribute)\n\n for v in var:\n if id(v) == attribute:\n return v\n\n raise UnableToResolveVariableException("Unable to resolve %s in %s" % (attribute, var))\n\n def get_dictionary(self, var):\n d = {}\n for i, item in enumerate(var):\n d[str(id(item))] = item\n\n if i >= pydevd_constants.PYDEVD_CONTAINER_RANDOM_ACCESS_MAX_ITEMS:\n d[TOO_LARGE_ATTR] = TOO_LARGE_MSG % (pydevd_constants.PYDEVD_CONTAINER_RANDOM_ACCESS_MAX_ITEMS,)\n break\n\n # in case if the class extends built-in type and has some additional fields\n additional_fields = defaultResolver.get_dictionary(var)\n d.update(additional_fields)\n d[GENERATED_LEN_ATTR_NAME] = len(var)\n return d\n\n def change_var_from_name(self, container, name, new_value):\n # The name given in this case must be the id(item), so, we can actually\n # iterate in the set and see which item matches the given id.\n\n try:\n # Check that the new value can actually be added to a set (i.e.: it's hashable/comparable).\n set().add(new_value)\n except:\n return None\n\n for item in container:\n if str(id(item)) == name:\n container.remove(item)\n container.add(new_value)\n return str(id(new_value))\n\n return None\n\n\n# =======================================================================================================================\n# InstanceResolver\n# =======================================================================================================================\nclass InstanceResolver:\n def resolve(self, var, attribute):\n field = var.__class__.getDeclaredField(attribute)\n field.setAccessible(True)\n return field.get(var)\n\n def get_dictionary(self, obj):\n ret = {}\n\n declaredFields = obj.__class__.getDeclaredFields()\n for i in range(len(declaredFields)):\n name = declaredFields[i].getName()\n try:\n declaredFields[i].setAccessible(True)\n ret[name] = declaredFields[i].get(obj)\n except:\n pydev_log.exception()\n\n return ret\n\n\n# =======================================================================================================================\n# JyArrayResolver\n# =======================================================================================================================\nclass JyArrayResolver:\n """\n This resolves a regular Object[] array from java\n """\n\n def resolve(self, var, attribute):\n if attribute == GENERATED_LEN_ATTR_NAME:\n return None\n return var[int(attribute)]\n\n def get_dictionary(self, obj):\n ret = {}\n\n for i in range(len(obj)):\n ret[i] = obj[i]\n\n ret[GENERATED_LEN_ATTR_NAME] = len(obj)\n return ret\n\n\n# =======================================================================================================================\n# MultiValueDictResolver\n# =======================================================================================================================\nclass MultiValueDictResolver(DictResolver):\n def resolve(self, dct, key):\n if key in (GENERATED_LEN_ATTR_NAME, TOO_LARGE_ATTR):\n return None\n\n # ok, we have to iterate over the items to find the one that matches the id, because that's the only way\n # to actually find the reference from the string we have before.\n expected_id = int(key.split("(")[-1][:-1])\n for key in list(dct.keys()):\n val = dct.getlist(key)\n if id(key) == expected_id:\n return val\n\n raise UnableToResolveVariableException()\n\n\n# =======================================================================================================================\n# DjangoFormResolver\n# =======================================================================================================================\nclass DjangoFormResolver(DefaultResolver):\n def get_dictionary(self, var, names=None):\n # Do not call self.errors because it is a property and has side effects.\n names, used___dict__ = self.get_names(var)\n\n has_errors_attr = False\n if "errors" in names:\n has_errors_attr = True\n names.remove("errors")\n\n d = defaultResolver.get_dictionary(var, names=names, used___dict__=used___dict__)\n if has_errors_attr:\n try:\n errors_attr = getattr(var, "_errors")\n except:\n errors_attr = None\n d["errors"] = errors_attr\n return d\n\n\n# =======================================================================================================================\n# DequeResolver\n# =======================================================================================================================\nclass DequeResolver(TupleResolver):\n def get_dictionary(self, var):\n d = TupleResolver.get_dictionary(self, var)\n d["maxlen"] = getattr(var, "maxlen", None)\n return d\n\n\n# =======================================================================================================================\n# OrderedDictResolver\n# =======================================================================================================================\nclass OrderedDictResolver(DictResolver):\n sort_keys = False\n\n def init_dict(self):\n return OrderedDict()\n\n\n# =======================================================================================================================\n# FrameResolver\n# =======================================================================================================================\nclass FrameResolver:\n """\n This resolves a frame.\n """\n\n def resolve(self, obj, attribute):\n if attribute == "__internals__":\n return defaultResolver.get_dictionary(obj)\n\n if attribute == "stack":\n return self.get_frame_stack(obj)\n\n if attribute == "f_locals":\n return obj.f_locals\n\n return None\n\n def get_dictionary(self, obj):\n ret = {}\n ret["__internals__"] = defaultResolver.get_dictionary(obj)\n ret["stack"] = self.get_frame_stack(obj)\n ret["f_locals"] = obj.f_locals\n return ret\n\n def get_frame_stack(self, frame):\n ret = []\n if frame is not None:\n ret.append(self.get_frame_name(frame))\n\n while frame.f_back:\n frame = frame.f_back\n ret.append(self.get_frame_name(frame))\n\n return ret\n\n def get_frame_name(self, frame):\n if frame is None:\n return "None"\n try:\n name = basename(frame.f_code.co_filename)\n return "frame: %s [%s:%s] id:%s" % (frame.f_code.co_name, name, frame.f_lineno, id(frame))\n except:\n return "frame object"\n\n\ndefaultResolver = DefaultResolver()\ndictResolver = DictResolver()\ntupleResolver = TupleResolver()\ninstanceResolver = InstanceResolver()\njyArrayResolver = JyArrayResolver()\nsetResolver = SetResolver()\nmultiValueDictResolver = MultiValueDictResolver()\ndjangoFormResolver = DjangoFormResolver()\ndequeResolver = DequeResolver()\norderedDictResolver = OrderedDictResolver()\nframeResolver = FrameResolver()\ndapGrouperResolver = DAPGrouperResolver()\nforwardInternalResolverToObject = ForwardInternalResolverToObject()\n\n\nclass InspectStub:\n def isbuiltin(self, _args):\n return False\n\n def isroutine(self, object):\n return False\n\n\ntry:\n import inspect\nexcept:\n inspect = InspectStub()\n\n\ndef get_var_scope(attr_name, attr_value, evaluate_name, handle_return_values):\n if attr_name.startswith("'"):\n if attr_name.endswith("'"):\n # i.e.: strings denote that it is a regular value in some container.\n return ""\n else:\n i = attr_name.find("__' (")\n if i >= 0:\n # Handle attr_name such as: >>'__name__' (1732494379184)<<\n attr_name = attr_name[1 : i + 2]\n\n if handle_return_values and attr_name == RETURN_VALUES_DICT:\n return ""\n\n elif attr_name == GENERATED_LEN_ATTR_NAME:\n return ""\n\n if attr_name.startswith("__") and attr_name.endswith("__"):\n return DAPGrouper.SCOPE_SPECIAL_VARS\n\n if attr_name.startswith("_") or attr_name.endswith("__"):\n return DAPGrouper.SCOPE_PROTECTED_VARS\n\n try:\n if inspect.isroutine(attr_value) or isinstance(attr_value, MethodWrapperType):\n return DAPGrouper.SCOPE_FUNCTION_VARS\n\n elif inspect.isclass(attr_value):\n return DAPGrouper.SCOPE_CLASS_VARS\n except:\n # It's possible that isinstance throws an exception when dealing with user-code.\n if DebugInfoHolder.DEBUG_TRACE_LEVEL > 0:\n pydev_log.exception()\n\n return ""\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_resolver.py
|
pydevd_resolver.py
|
Python
| 30,451 | 0.95 | 0.226506 | 0.129032 |
node-utils
| 811 |
2024-07-21T14:23:43.733485
|
Apache-2.0
| false |
625724f6e61ab304550ed11c8651534c
|
"""\nUtility for saving locals.\n"""\nimport sys\nfrom _pydevd_bundle.pydevd_constants import IS_PY313_OR_GREATER\nfrom _pydev_bundle import pydev_log\n\ntry:\n import types\n\n frame_type = types.FrameType\nexcept:\n frame_type = type(sys._getframe())\n\n\ndef is_save_locals_available():\n return save_locals_impl is not None\n\n\ndef save_locals(frame):\n """\n Copy values from locals_dict into the fast stack slots in the given frame.\n\n Note: the 'save_locals' branch had a different approach wrapping the frame (much more code, but it gives ideas\n on how to save things partially, not the 'whole' locals).\n """\n if not isinstance(frame, frame_type):\n # Fix exception when changing Django variable (receiving DjangoTemplateFrame)\n return\n\n if save_locals_impl is not None:\n try:\n save_locals_impl(frame)\n except:\n pass\n\n\ndef make_save_locals_impl():\n """\n Factory for the 'save_locals_impl' method. This may seem like a complicated pattern but it is essential that the method is created at\n module load time. Inner imports after module load time would cause an occasional debugger deadlock due to the importer lock and debugger\n lock being taken in different order in different threads.\n """\n try:\n if "__pypy__" in sys.builtin_module_names:\n import __pypy__ # @UnresolvedImport\n\n save_locals = __pypy__.locals_to_fast\n except:\n pass\n else:\n if "__pypy__" in sys.builtin_module_names:\n\n def save_locals_pypy_impl(frame):\n save_locals(frame)\n\n return save_locals_pypy_impl\n\n if IS_PY313_OR_GREATER:\n # No longer needed in Python 3.13 (deprecated)\n # See PEP 667\n return None\n\n try:\n import ctypes\n\n locals_to_fast = ctypes.pythonapi.PyFrame_LocalsToFast\n except:\n pass\n else:\n\n def save_locals_ctypes_impl(frame):\n locals_to_fast(ctypes.py_object(frame), ctypes.c_int(0))\n\n return save_locals_ctypes_impl\n\n return None\n\n\nsave_locals_impl = make_save_locals_impl()\n\n_SENTINEL = [] # Any mutable will do.\n\n\ndef update_globals_and_locals(updated_globals, initial_globals, frame):\n # We don't have the locals and passed all in globals, so, we have to\n # manually choose how to update the variables.\n #\n # Note that the current implementation is a bit tricky: it does work in general\n # but if we do something as 'some_var = 10' and 'some_var' is already defined to have\n # the value '10' in the globals, we won't actually put that value in the locals\n # (which means that the frame locals won't be updated).\n # Still, the approach to have a single namespace was chosen because it was the only\n # one that enabled creating and using variables during the same evaluation.\n assert updated_globals is not None\n f_locals = None\n\n removed = set(initial_globals).difference(updated_globals)\n\n for key, val in updated_globals.items():\n if val is not initial_globals.get(key, _SENTINEL):\n if f_locals is None:\n # Note: we call f_locals only once because each time\n # we call it the values may be reset.\n f_locals = frame.f_locals\n\n f_locals[key] = val\n\n if removed:\n if f_locals is None:\n # Note: we call f_locals only once because each time\n # we call it the values may be reset.\n f_locals = frame.f_locals\n\n for key in removed:\n try:\n del f_locals[key]\n except Exception:\n # Python 3.13.0 has issues here:\n # https://github.com/python/cpython/pull/125616\n # This should be backported from the pull request\n # but we still need to handle it in this version\n try:\n if key in f_locals:\n f_locals[key] = None\n except Exception as e:\n pydev_log.info("Unable to remove key: %s from locals. Exception: %s", key, e)\n\n if f_locals is not None:\n save_locals(frame)\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_save_locals.py
|
pydevd_save_locals.py
|
Python
| 4,290 | 0.95 | 0.215385 | 0.2 |
awesome-app
| 293 |
2025-06-27T17:58:05.761249
|
Apache-2.0
| false |
bf5c4f80157253d2d3d4ee235c49d456
|
from _pydev_bundle import pydev_log\n\ntry:\n import trace\nexcept ImportError:\n pass\nelse:\n trace._warn = lambda *args: None # workaround for http://bugs.python.org/issue17143 (PY-8706)\n\nimport os\nfrom _pydevd_bundle.pydevd_comm import CMD_SIGNATURE_CALL_TRACE, NetCommand\nfrom _pydevd_bundle import pydevd_xml\nfrom _pydevd_bundle.pydevd_utils import get_clsname_for_code\n\n\nclass Signature(object):\n def __init__(self, file, name):\n self.file = file\n self.name = name\n self.args = []\n self.args_str = []\n self.return_type = None\n\n def add_arg(self, name, type):\n self.args.append((name, type))\n self.args_str.append("%s:%s" % (name, type))\n\n def set_args(self, frame, recursive=False):\n self.args = []\n\n code = frame.f_code\n locals = frame.f_locals\n\n for i in range(0, code.co_argcount):\n name = code.co_varnames[i]\n class_name = get_type_of_value(locals[name], recursive=recursive)\n\n self.add_arg(name, class_name)\n\n def __str__(self):\n return "%s %s(%s)" % (self.file, self.name, ", ".join(self.args_str))\n\n\ndef get_type_of_value(value, ignore_module_name=("__main__", "__builtin__", "builtins"), recursive=False):\n tp = type(value)\n class_name = tp.__name__\n if class_name == "instance": # old-style classes\n tp = value.__class__\n class_name = tp.__name__\n\n if hasattr(tp, "__module__") and tp.__module__ and tp.__module__ not in ignore_module_name:\n class_name = "%s.%s" % (tp.__module__, class_name)\n\n if class_name == "list":\n class_name = "List"\n if len(value) > 0 and recursive:\n class_name += "[%s]" % get_type_of_value(value[0], recursive=recursive)\n return class_name\n\n if class_name == "dict":\n class_name = "Dict"\n if len(value) > 0 and recursive:\n for k, v in value.items():\n class_name += "[%s, %s]" % (get_type_of_value(k, recursive=recursive), get_type_of_value(v, recursive=recursive))\n break\n return class_name\n\n if class_name == "tuple":\n class_name = "Tuple"\n if len(value) > 0 and recursive:\n class_name += "["\n class_name += ", ".join(get_type_of_value(v, recursive=recursive) for v in value)\n class_name += "]"\n\n return class_name\n\n\ndef _modname(path):\n """Return a plausible module name for the path"""\n base = os.path.basename(path)\n filename, ext = os.path.splitext(base)\n return filename\n\n\nclass SignatureFactory(object):\n def __init__(self):\n self._caller_cache = {}\n self.cache = CallSignatureCache()\n\n def create_signature(self, frame, filename, with_args=True):\n try:\n _, modulename, funcname = self.file_module_function_of(frame)\n signature = Signature(filename, funcname)\n if with_args:\n signature.set_args(frame, recursive=True)\n return signature\n except:\n pydev_log.exception()\n\n def file_module_function_of(self, frame): # this code is take from trace module and fixed to work with new-style classes\n code = frame.f_code\n filename = code.co_filename\n if filename:\n modulename = _modname(filename)\n else:\n modulename = None\n\n funcname = code.co_name\n clsname = None\n if code in self._caller_cache:\n if self._caller_cache[code] is not None:\n clsname = self._caller_cache[code]\n else:\n self._caller_cache[code] = None\n clsname = get_clsname_for_code(code, frame)\n if clsname is not None:\n # cache the result - assumption is that new.* is\n # not called later to disturb this relationship\n # _caller_cache could be flushed if functions in\n # the new module get called.\n self._caller_cache[code] = clsname\n\n if clsname is not None:\n funcname = "%s.%s" % (clsname, funcname)\n\n return filename, modulename, funcname\n\n\ndef get_signature_info(signature):\n return signature.file, signature.name, " ".join([arg[1] for arg in signature.args])\n\n\ndef get_frame_info(frame):\n co = frame.f_code\n return co.co_name, frame.f_lineno, co.co_filename\n\n\nclass CallSignatureCache(object):\n def __init__(self):\n self.cache = {}\n\n def add(self, signature):\n filename, name, args_type = get_signature_info(signature)\n calls_from_file = self.cache.setdefault(filename, {})\n name_calls = calls_from_file.setdefault(name, {})\n name_calls[args_type] = None\n\n def is_in_cache(self, signature):\n filename, name, args_type = get_signature_info(signature)\n if args_type in self.cache.get(filename, {}).get(name, {}):\n return True\n return False\n\n\ndef create_signature_message(signature):\n cmdTextList = ["<xml>"]\n\n cmdTextList.append(\n '<call_signature file="%s" name="%s">'\n % (pydevd_xml.make_valid_xml_value(signature.file), pydevd_xml.make_valid_xml_value(signature.name))\n )\n\n for arg in signature.args:\n cmdTextList.append(\n '<arg name="%s" type="%s"></arg>' % (pydevd_xml.make_valid_xml_value(arg[0]), pydevd_xml.make_valid_xml_value(arg[1]))\n )\n\n if signature.return_type is not None:\n cmdTextList.append('<return type="%s"></return>' % (pydevd_xml.make_valid_xml_value(signature.return_type)))\n\n cmdTextList.append("</call_signature></xml>")\n cmdText = "".join(cmdTextList)\n return NetCommand(CMD_SIGNATURE_CALL_TRACE, 0, cmdText)\n\n\ndef send_signature_call_trace(dbg, frame, filename):\n if dbg.signature_factory and dbg.in_project_scope(frame):\n signature = dbg.signature_factory.create_signature(frame, filename)\n if signature is not None:\n if dbg.signature_factory.cache is not None:\n if not dbg.signature_factory.cache.is_in_cache(signature):\n dbg.signature_factory.cache.add(signature)\n dbg.writer.add_command(create_signature_message(signature))\n return True\n else:\n # we don't send signature if it is cached\n return False\n else:\n dbg.writer.add_command(create_signature_message(signature))\n return True\n return False\n\n\ndef send_signature_return_trace(dbg, frame, filename, return_value):\n if dbg.signature_factory and dbg.in_project_scope(frame):\n signature = dbg.signature_factory.create_signature(frame, filename, with_args=False)\n signature.return_type = get_type_of_value(return_value, recursive=True)\n dbg.writer.add_command(create_signature_message(signature))\n return True\n\n return False\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_signature.py
|
pydevd_signature.py
|
Python
| 7,078 | 0.95 | 0.258706 | 0.032051 |
awesome-app
| 277 |
2024-04-20T10:09:36.594752
|
Apache-2.0
| false |
af6d5960e488f63d14b6f3b5d0dae238
|
import bisect\nfrom _pydevd_bundle.pydevd_constants import NULL, KeyifyList\nimport pydevd_file_utils\n\n\nclass SourceMappingEntry(object):\n __slots__ = ["source_filename", "line", "end_line", "runtime_line", "runtime_source"]\n\n def __init__(self, line, end_line, runtime_line, runtime_source):\n assert isinstance(runtime_source, str)\n\n self.line = int(line)\n self.end_line = int(end_line)\n self.runtime_line = int(runtime_line)\n self.runtime_source = runtime_source # Something as <ipython-cell-xxx>\n\n # Should be set after translated to server (absolute_source_filename).\n # This is what's sent to the client afterwards (so, its case should not be normalized).\n self.source_filename = None\n\n def contains_line(self, i):\n return self.line <= i <= self.end_line\n\n def contains_runtime_line(self, i):\n line_count = self.end_line + self.line\n runtime_end_line = self.runtime_line + line_count\n return self.runtime_line <= i <= runtime_end_line\n\n def __str__(self):\n return "SourceMappingEntry(%s)" % (", ".join("%s=%r" % (attr, getattr(self, attr)) for attr in self.__slots__))\n\n __repr__ = __str__\n\n\nclass SourceMapping(object):\n def __init__(self, on_source_mapping_changed=NULL):\n self._mappings_to_server = {} # dict(normalized(file.py) to [SourceMappingEntry])\n self._mappings_to_client = {} # dict(<cell> to File.py)\n self._cache = {}\n self._on_source_mapping_changed = on_source_mapping_changed\n\n def set_source_mapping(self, absolute_filename, mapping):\n """\n :param str absolute_filename:\n The filename for the source mapping (bytes on py2 and str on py3).\n\n :param list(SourceMappingEntry) mapping:\n A list with the source mapping entries to be applied to the given filename.\n\n :return str:\n An error message if it was not possible to set the mapping or an empty string if\n everything is ok.\n """\n # Let's first validate if it's ok to apply that mapping.\n # File mappings must be 1:N, not M:N (i.e.: if there's a mapping from file1.py to <cell1>,\n # there can be no other mapping from any other file to <cell1>).\n # This is a limitation to make it easier to remove existing breakpoints when new breakpoints are\n # set to a file (so, any file matching that breakpoint can be removed instead of needing to check\n # which lines are corresponding to that file).\n for map_entry in mapping:\n existing_source_filename = self._mappings_to_client.get(map_entry.runtime_source)\n if existing_source_filename and existing_source_filename != absolute_filename:\n return "Cannot apply mapping from %s to %s (it conflicts with mapping: %s to %s)" % (\n absolute_filename,\n map_entry.runtime_source,\n existing_source_filename,\n map_entry.runtime_source,\n )\n\n try:\n absolute_normalized_filename = pydevd_file_utils.normcase(absolute_filename)\n current_mapping = self._mappings_to_server.get(absolute_normalized_filename, [])\n for map_entry in current_mapping:\n del self._mappings_to_client[map_entry.runtime_source]\n\n self._mappings_to_server[absolute_normalized_filename] = sorted(mapping, key=lambda entry: entry.line)\n\n for map_entry in mapping:\n self._mappings_to_client[map_entry.runtime_source] = absolute_filename\n finally:\n self._cache.clear()\n self._on_source_mapping_changed()\n return ""\n\n def map_to_client(self, runtime_source_filename, lineno):\n key = (lineno, "client", runtime_source_filename)\n try:\n return self._cache[key]\n except KeyError:\n for _, mapping in list(self._mappings_to_server.items()):\n for map_entry in mapping:\n if map_entry.runtime_source == runtime_source_filename: # <cell1>\n if map_entry.contains_runtime_line(lineno): # matches line range\n self._cache[key] = (map_entry.source_filename, map_entry.line + (lineno - map_entry.runtime_line), True)\n return self._cache[key]\n\n self._cache[key] = (runtime_source_filename, lineno, False) # Mark that no translation happened in the cache.\n return self._cache[key]\n\n def has_mapping_entry(self, runtime_source_filename):\n """\n :param runtime_source_filename:\n Something as <ipython-cell-xxx>\n """\n # Note that we're not interested in the line here, just on knowing if a given filename\n # (from the server) has a mapping for it.\n key = ("has_entry", runtime_source_filename)\n try:\n return self._cache[key]\n except KeyError:\n for _absolute_normalized_filename, mapping in list(self._mappings_to_server.items()):\n for map_entry in mapping:\n if map_entry.runtime_source == runtime_source_filename:\n self._cache[key] = True\n return self._cache[key]\n\n self._cache[key] = False\n return self._cache[key]\n\n def map_to_server(self, absolute_filename, lineno):\n """\n Convert something as 'file1.py' at line 10 to '<ipython-cell-xxx>' at line 2.\n\n Note that the name should be already normalized at this point.\n """\n absolute_normalized_filename = pydevd_file_utils.normcase(absolute_filename)\n\n changed = False\n mappings = self._mappings_to_server.get(absolute_normalized_filename)\n if mappings:\n i = bisect.bisect(KeyifyList(mappings, lambda entry: entry.line), lineno)\n if i >= len(mappings):\n i -= 1\n\n if i == 0:\n entry = mappings[i]\n\n else:\n entry = mappings[i - 1]\n\n if not entry.contains_line(lineno):\n entry = mappings[i]\n if not entry.contains_line(lineno):\n entry = None\n\n if entry is not None:\n lineno = entry.runtime_line + (lineno - entry.line)\n\n absolute_filename = entry.runtime_source\n changed = True\n\n return absolute_filename, lineno, changed\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_source_mapping.py
|
pydevd_source_mapping.py
|
Python
| 6,643 | 0.95 | 0.256579 | 0.081967 |
node-utils
| 715 |
2024-09-16T06:38:11.022089
|
Apache-2.0
| false |
4e62ec9f9e9fb2f0dad327f541821549
|
from _pydevd_bundle import pydevd_utils\nfrom _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info\nfrom _pydevd_bundle.pydevd_comm_constants import CMD_STEP_INTO, CMD_THREAD_SUSPEND\nfrom _pydevd_bundle.pydevd_constants import PYTHON_SUSPEND, STATE_SUSPEND, get_thread_id, STATE_RUN, PYDEVD_USE_SYS_MONITORING\nfrom _pydev_bundle._pydev_saved_modules import threading\nfrom _pydev_bundle import pydev_log\nimport sys\nfrom _pydevd_sys_monitoring import pydevd_sys_monitoring\n\n\ndef pydevd_find_thread_by_id(thread_id):\n try:\n threads = threading.enumerate()\n for i in threads:\n tid = get_thread_id(i)\n if thread_id == tid or thread_id.endswith("|" + tid):\n return i\n\n # This can happen when a request comes for a thread which was previously removed.\n pydev_log.info("Could not find thread %s.", thread_id)\n pydev_log.info("Available: %s.", ([get_thread_id(t) for t in threads],))\n except:\n pydev_log.exception()\n\n return None\n\n\ndef mark_thread_suspended(thread, stop_reason: int, original_step_cmd: int = -1):\n info = set_additional_thread_info(thread)\n info.suspend_type = PYTHON_SUSPEND\n if original_step_cmd != -1:\n stop_reason = original_step_cmd\n thread.stop_reason = stop_reason\n\n # Note: don't set the 'pydev_original_step_cmd' here if unset.\n\n if info.pydev_step_cmd == -1:\n # If the step command is not specified, set it to step into\n # to make sure it'll break as soon as possible.\n info.pydev_step_cmd = CMD_STEP_INTO\n info.pydev_step_stop = None\n\n # Mark as suspended as the last thing.\n info.pydev_state = STATE_SUSPEND\n info.update_stepping_info()\n return info\n\n\ndef internal_run_thread(thread, set_additional_thread_info):\n info = set_additional_thread_info(thread)\n info.pydev_original_step_cmd = -1\n info.pydev_step_cmd = -1\n info.pydev_step_stop = None\n info.pydev_state = STATE_RUN\n info.update_stepping_info()\n\n\ndef resume_threads(thread_id, except_thread=None):\n pydev_log.info("Resuming threads: %s (except thread: %s)", thread_id, except_thread)\n threads = []\n if thread_id == "*":\n threads = pydevd_utils.get_non_pydevd_threads()\n\n elif thread_id.startswith("__frame__:"):\n pydev_log.critical("Can't make tasklet run: %s", thread_id)\n\n else:\n threads = [pydevd_find_thread_by_id(thread_id)]\n\n for t in threads:\n if t is None or t is except_thread:\n pydev_log.info("Skipped resuming thread: %s", t)\n continue\n\n internal_run_thread(t, set_additional_thread_info=set_additional_thread_info)\n\n\ndef suspend_all_threads(py_db, except_thread):\n """\n Suspend all except the one passed as a parameter.\n :param except_thread:\n """\n if PYDEVD_USE_SYS_MONITORING:\n pydevd_sys_monitoring.update_monitor_events(suspend_requested=True)\n\n pydev_log.info("Suspending all threads except: %s", except_thread)\n all_threads = pydevd_utils.get_non_pydevd_threads()\n for t in all_threads:\n if getattr(t, "pydev_do_not_trace", None):\n pass # skip some other threads, i.e. ipython history saving thread from debug console\n else:\n if t is except_thread:\n continue\n info = mark_thread_suspended(t, CMD_THREAD_SUSPEND)\n frame = info.get_topmost_frame(t)\n\n # Reset the tracing as in this case as it could've set scopes to be untraced.\n if frame is not None:\n try:\n py_db.set_trace_for_frame_and_parents(t.ident, frame)\n finally:\n frame = None\n\n if PYDEVD_USE_SYS_MONITORING:\n # After suspending the frames we need the monitoring to be reset.\n pydevd_sys_monitoring.restart_events()\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_thread_lifecycle.py
|
pydevd_thread_lifecycle.py
|
Python
| 3,960 | 0.95 | 0.216981 | 0.083333 |
python-kit
| 48 |
2023-08-08T22:29:39.488426
|
GPL-3.0
| false |
276f56d9e0ffa9d11d78459f583c21c5
|
from _pydev_bundle._pydev_saved_modules import ThreadingEvent, ThreadingLock, threading_current_thread\nfrom _pydevd_bundle.pydevd_daemon_thread import PyDBDaemonThread\nfrom _pydevd_bundle.pydevd_constants import thread_get_ident, IS_CPYTHON, NULL\nimport ctypes\nimport time\nfrom _pydev_bundle import pydev_log\nimport weakref\nfrom _pydevd_bundle.pydevd_utils import is_current_thread_main_thread\nfrom _pydevd_bundle import pydevd_utils\n\n_DEBUG = False # Default should be False as this can be very verbose.\n\n\nclass _TimeoutThread(PyDBDaemonThread):\n """\n The idea in this class is that it should be usually stopped waiting\n for the next event to be called (paused in a threading.Event.wait).\n\n When a new handle is added it sets the event so that it processes the handles and\n then keeps on waiting as needed again.\n\n This is done so that it's a bit more optimized than creating many Timer threads.\n """\n\n def __init__(self, py_db):\n PyDBDaemonThread.__init__(self, py_db)\n self._event = ThreadingEvent()\n self._handles = []\n\n # We could probably do things valid without this lock so that it's possible to add\n # handles while processing, but the implementation would also be harder to follow,\n # so, for now, we're either processing or adding handles, not both at the same time.\n self._lock = ThreadingLock()\n\n def _on_run(self):\n wait_time = None\n while not self._kill_received:\n if _DEBUG:\n if wait_time is None:\n pydev_log.critical("pydevd_timeout: Wait until a new handle is added.")\n else:\n pydev_log.critical("pydevd_timeout: Next wait time: %s.", wait_time)\n self._event.wait(wait_time)\n\n if self._kill_received:\n self._handles = []\n return\n\n wait_time = self.process_handles()\n\n def process_handles(self):\n """\n :return int:\n Returns the time we should be waiting for to process the next event properly.\n """\n with self._lock:\n if _DEBUG:\n pydev_log.critical("pydevd_timeout: Processing handles")\n self._event.clear()\n handles = self._handles\n new_handles = self._handles = []\n\n # Do all the processing based on this time (we want to consider snapshots\n # of processing time -- anything not processed now may be processed at the\n # next snapshot).\n curtime = time.time()\n\n min_handle_timeout = None\n\n for handle in handles:\n if curtime < handle.abs_timeout and not handle.disposed:\n # It still didn't time out.\n if _DEBUG:\n pydev_log.critical("pydevd_timeout: Handle NOT processed: %s", handle)\n new_handles.append(handle)\n if min_handle_timeout is None:\n min_handle_timeout = handle.abs_timeout\n\n elif handle.abs_timeout < min_handle_timeout:\n min_handle_timeout = handle.abs_timeout\n\n else:\n if _DEBUG:\n pydev_log.critical("pydevd_timeout: Handle processed: %s", handle)\n # Timed out (or disposed), so, let's execute it (should be no-op if disposed).\n handle.exec_on_timeout()\n\n if min_handle_timeout is None:\n return None\n else:\n timeout = min_handle_timeout - curtime\n if timeout <= 0:\n pydev_log.critical("pydevd_timeout: Expected timeout to be > 0. Found: %s", timeout)\n\n return timeout\n\n def do_kill_pydev_thread(self):\n PyDBDaemonThread.do_kill_pydev_thread(self)\n with self._lock:\n self._event.set()\n\n def add_on_timeout_handle(self, handle):\n with self._lock:\n self._handles.append(handle)\n self._event.set()\n\n\nclass _OnTimeoutHandle(object):\n def __init__(self, tracker, abs_timeout, on_timeout, kwargs):\n self._str = "_OnTimeoutHandle(%s)" % (on_timeout,)\n\n self._tracker = weakref.ref(tracker)\n self.abs_timeout = abs_timeout\n self.on_timeout = on_timeout\n if kwargs is None:\n kwargs = {}\n self.kwargs = kwargs\n self.disposed = False\n\n def exec_on_timeout(self):\n # Note: lock should already be obtained when executing this function.\n kwargs = self.kwargs\n on_timeout = self.on_timeout\n\n if not self.disposed:\n self.disposed = True\n self.kwargs = None\n self.on_timeout = None\n\n try:\n if _DEBUG:\n pydev_log.critical("pydevd_timeout: Calling on timeout: %s with kwargs: %s", on_timeout, kwargs)\n\n on_timeout(**kwargs)\n except Exception:\n pydev_log.exception("pydevd_timeout: Exception on callback timeout.")\n\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n tracker = self._tracker()\n\n if tracker is None:\n lock = NULL\n else:\n lock = tracker._lock\n\n with lock:\n self.disposed = True\n self.kwargs = None\n self.on_timeout = None\n\n def __str__(self):\n return self._str\n\n __repr__ = __str__\n\n\nclass TimeoutTracker(object):\n """\n This is a helper class to track the timeout of something.\n """\n\n def __init__(self, py_db):\n self._thread = None\n self._lock = ThreadingLock()\n self._py_db = weakref.ref(py_db)\n\n def call_on_timeout(self, timeout, on_timeout, kwargs=None):\n """\n This can be called regularly to always execute the given function after a given timeout:\n\n call_on_timeout(py_db, 10, on_timeout)\n\n\n Or as a context manager to stop the method from being called if it finishes before the timeout\n elapses:\n\n with call_on_timeout(py_db, 10, on_timeout):\n ...\n\n Note: the callback will be called from a PyDBDaemonThread.\n """\n with self._lock:\n if self._thread is None:\n if _DEBUG:\n pydev_log.critical("pydevd_timeout: Created _TimeoutThread.")\n\n self._thread = _TimeoutThread(self._py_db())\n self._thread.start()\n\n curtime = time.time()\n handle = _OnTimeoutHandle(self, curtime + timeout, on_timeout, kwargs)\n if _DEBUG:\n pydev_log.critical("pydevd_timeout: Added handle: %s.", handle)\n self._thread.add_on_timeout_handle(handle)\n return handle\n\n\ndef create_interrupt_this_thread_callback():\n """\n The idea here is returning a callback that when called will generate a KeyboardInterrupt\n in the thread that called this function.\n\n If this is the main thread, this means that it'll emulate a Ctrl+C (which may stop I/O\n and sleep operations).\n\n For other threads, this will call PyThreadState_SetAsyncExc to raise\n a KeyboardInterrupt before the next instruction (so, it won't really interrupt I/O or\n sleep operations).\n\n :return callable:\n Returns a callback that will interrupt the current thread (this may be called\n from an auxiliary thread).\n """\n tid = thread_get_ident()\n\n if is_current_thread_main_thread():\n main_thread = threading_current_thread()\n\n def raise_on_this_thread():\n pydev_log.debug("Callback to interrupt main thread.")\n pydevd_utils.interrupt_main_thread(main_thread)\n\n else:\n # Note: this works in the sense that it can stop some cpu-intensive slow operation,\n # but we can't really interrupt the thread out of some sleep or I/O operation\n # (this will only be raised when Python is about to execute the next instruction).\n def raise_on_this_thread():\n if IS_CPYTHON:\n pydev_log.debug("Interrupt thread: %s", tid)\n ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), ctypes.py_object(KeyboardInterrupt))\n else:\n pydev_log.debug("It is only possible to interrupt non-main threads in CPython.")\n\n return raise_on_this_thread\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_timeout.py
|
pydevd_timeout.py
|
Python
| 8,644 | 0.95 | 0.21519 | 0.065217 |
vue-tools
| 779 |
2024-06-05T19:49:33.214157
|
GPL-3.0
| false |
9fb3eef21e05a0bb9abf82068e8d13c3
|
"""For debug purpose we are replacing actual builtin property by the debug property\n"""\nfrom _pydevd_bundle.pydevd_comm import get_global_debugger\nfrom _pydev_bundle import pydev_log\n\n\n# =======================================================================================================================\n# replace_builtin_property\n# =======================================================================================================================\ndef replace_builtin_property(new_property=None):\n if new_property is None:\n new_property = DebugProperty\n original = property\n try:\n import builtins\n\n builtins.__dict__["property"] = new_property\n except:\n pydev_log.exception() # @Reimport\n return original\n\n\n# =======================================================================================================================\n# DebugProperty\n# =======================================================================================================================\nclass DebugProperty(object):\n """A custom property which allows python property to get\n controlled by the debugger and selectively disable/re-enable\n the tracing.\n """\n\n def __init__(self, fget=None, fset=None, fdel=None, doc=None):\n self.fget = fget\n self.fset = fset\n self.fdel = fdel\n self.__doc__ = doc\n\n def __get__(self, obj, objtype=None):\n if obj is None:\n return self\n global_debugger = get_global_debugger()\n try:\n if global_debugger is not None and global_debugger.disable_property_getter_trace:\n global_debugger.disable_tracing()\n if self.fget is None:\n raise AttributeError("unreadable attribute")\n return self.fget(obj)\n finally:\n if global_debugger is not None:\n global_debugger.enable_tracing()\n\n def __set__(self, obj, value):\n global_debugger = get_global_debugger()\n try:\n if global_debugger is not None and global_debugger.disable_property_setter_trace:\n global_debugger.disable_tracing()\n if self.fset is None:\n raise AttributeError("can't set attribute")\n self.fset(obj, value)\n finally:\n if global_debugger is not None:\n global_debugger.enable_tracing()\n\n def __delete__(self, obj):\n global_debugger = get_global_debugger()\n try:\n if global_debugger is not None and global_debugger.disable_property_deleter_trace:\n global_debugger.disable_tracing()\n if self.fdel is None:\n raise AttributeError("can't delete attribute")\n self.fdel(obj)\n finally:\n if global_debugger is not None:\n global_debugger.enable_tracing()\n\n def getter(self, fget):\n """Overriding getter decorator for the property"""\n self.fget = fget\n return self\n\n def setter(self, fset):\n """Overriding setter decorator for the property"""\n self.fset = fset\n return self\n\n def deleter(self, fdel):\n """Overriding deleter decorator for the property"""\n self.fdel = fdel\n return self\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_traceproperty.py
|
pydevd_traceproperty.py
|
Python
| 3,345 | 0.95 | 0.303371 | 0.077922 |
vue-tools
| 491 |
2024-07-08T00:09:44.571804
|
Apache-2.0
| false |
b55725598f62006df777b1f66da28e47
|
# Defines which version of the trace_dispatch we'll use.\n# Should give warning only here if cython is not available but supported.\n\nimport os\nfrom _pydevd_bundle.pydevd_constants import USE_CYTHON_FLAG, ENV_TRUE_LOWER_VALUES, ENV_FALSE_LOWER_VALUES\nfrom _pydev_bundle import pydev_log\n\ndirname = os.path.dirname(os.path.dirname(__file__))\nUSING_CYTHON = False\n\n\ndef delete_old_compiled_extensions():\n import _pydevd_bundle\n\n cython_extensions_dir = os.path.dirname(os.path.dirname(_pydevd_bundle.__file__))\n _pydevd_bundle_ext_dir = os.path.dirname(_pydevd_bundle.__file__)\n _pydevd_frame_eval_ext_dir = os.path.join(cython_extensions_dir, "_pydevd_frame_eval_ext")\n try:\n import shutil\n\n for file in os.listdir(_pydevd_bundle_ext_dir):\n if file.startswith("pydevd") and file.endswith(".so"):\n os.remove(os.path.join(_pydevd_bundle_ext_dir, file))\n for file in os.listdir(_pydevd_frame_eval_ext_dir):\n if file.startswith("pydevd") and file.endswith(".so"):\n os.remove(os.path.join(_pydevd_frame_eval_ext_dir, file))\n build_dir = os.path.join(cython_extensions_dir, "build")\n if os.path.exists(build_dir):\n shutil.rmtree(os.path.join(cython_extensions_dir, "build"))\n except OSError:\n pydev_log.error_once(\n "warning: failed to delete old cython speedups. Please delete all *.so files from the directories "\n '"%s" and "%s"' % (_pydevd_bundle_ext_dir, _pydevd_frame_eval_ext_dir)\n )\n\n\nif USE_CYTHON_FLAG in ENV_TRUE_LOWER_VALUES:\n # We must import the cython version if forcing cython\n from _pydevd_bundle.pydevd_cython_wrapper import (\n trace_dispatch,\n global_cache_skips,\n global_cache_frame_skips,\n fix_top_level_trace_and_get_trace_func,\n )\n from _pydevd_bundle.pydevd_cython_wrapper import should_stop_on_exception, handle_exception, is_unhandled_exception\n\n USING_CYTHON = True\n\nelif USE_CYTHON_FLAG in ENV_FALSE_LOWER_VALUES:\n # Use the regular version if not forcing cython\n from _pydevd_bundle.pydevd_trace_dispatch_regular import (\n trace_dispatch,\n global_cache_skips,\n global_cache_frame_skips,\n fix_top_level_trace_and_get_trace_func,\n ) # @UnusedImport\n from .pydevd_frame import should_stop_on_exception, handle_exception, is_unhandled_exception\n\nelse:\n # Regular: use fallback if not found and give message to user\n try:\n from _pydevd_bundle.pydevd_cython_wrapper import (\n trace_dispatch,\n global_cache_skips,\n global_cache_frame_skips,\n fix_top_level_trace_and_get_trace_func,\n )\n from _pydevd_bundle.pydevd_cython_wrapper import should_stop_on_exception, handle_exception, is_unhandled_exception\n\n # This version number is always available\n from _pydevd_bundle.pydevd_additional_thread_info_regular import version as regular_version\n\n # This version number from the already compiled cython extension\n from _pydevd_bundle.pydevd_cython_wrapper import version as cython_version\n\n if cython_version != regular_version:\n # delete_old_compiled_extensions() -- would be ok in dev mode but we don't want to erase\n # files from other python versions on release, so, just raise import error here.\n raise ImportError("Cython version of speedups does not match.")\n else:\n USING_CYTHON = True\n\n except ImportError:\n from _pydevd_bundle.pydevd_trace_dispatch_regular import (\n trace_dispatch,\n global_cache_skips,\n global_cache_frame_skips,\n fix_top_level_trace_and_get_trace_func,\n ) # @UnusedImport\n from .pydevd_frame import should_stop_on_exception, handle_exception, is_unhandled_exception\n\n pydev_log.show_compile_cython_command_line()\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_trace_dispatch.py
|
pydevd_trace_dispatch.py
|
Python
| 4,009 | 0.95 | 0.152174 | 0.118421 |
react-lib
| 559 |
2023-07-17T22:09:34.526110
|
BSD-3-Clause
| false |
f72689aa51a83a150f4efe6fd92fd652
|
from __future__ import nested_scopes\nimport traceback\nimport warnings\nfrom _pydev_bundle import pydev_log\nfrom _pydev_bundle._pydev_saved_modules import thread, threading\nfrom _pydev_bundle import _pydev_saved_modules\nimport signal\nimport os\nimport ctypes\nfrom importlib import import_module\nfrom importlib.util import module_from_spec, spec_from_file_location\nfrom urllib.parse import quote # @UnresolvedImport\nimport time\nimport inspect\nimport sys\nfrom _pydevd_bundle.pydevd_constants import (\n USE_CUSTOM_SYS_CURRENT_FRAMES,\n IS_PYPY,\n SUPPORT_GEVENT,\n GEVENT_SUPPORT_NOT_SET_MSG,\n GENERATED_LEN_ATTR_NAME,\n PYDEVD_WARN_SLOW_RESOLVE_TIMEOUT,\n get_global_debugger,\n)\n\n\ndef save_main_module(file, module_name):\n # patch provided by: Scott Schlesier - when script is run, it does not\n # use globals from pydevd:\n # This will prevent the pydevd script from contaminating the namespace for the script to be debugged\n # pretend pydevd is not the main module, and\n # convince the file to be debugged that it was loaded as main\n m = sys.modules[module_name] = sys.modules["__main__"]\n m.__name__ = module_name\n loader = m.__loader__ if hasattr(m, "__loader__") else None\n spec = spec_from_file_location("__main__", file, loader=loader)\n m = module_from_spec(spec)\n sys.modules["__main__"] = m\n return m\n\n\ndef is_current_thread_main_thread():\n if hasattr(threading, "main_thread"):\n return threading.current_thread() is threading.main_thread()\n else:\n return isinstance(threading.current_thread(), threading._MainThread)\n\n\ndef get_main_thread():\n if hasattr(threading, "main_thread"):\n return threading.main_thread()\n else:\n for t in threading.enumerate():\n if isinstance(t, threading._MainThread):\n return t\n return None\n\n\ndef to_number(x):\n if is_string(x):\n try:\n n = float(x)\n return n\n except ValueError:\n pass\n\n l = x.find("(")\n if l != -1:\n y = x[0 : l - 1]\n # print y\n try:\n n = float(y)\n return n\n except ValueError:\n pass\n return None\n\n\ndef compare_object_attrs_key(x):\n if GENERATED_LEN_ATTR_NAME == x:\n as_number = to_number(x)\n if as_number is None:\n as_number = 99999999\n # len() should appear after other attributes in a list.\n return (1, as_number)\n else:\n return (-1, to_string(x))\n\n\ndef is_string(x):\n return isinstance(x, str)\n\n\ndef to_string(x):\n if isinstance(x, str):\n return x\n else:\n return str(x)\n\n\ndef print_exc():\n if traceback:\n traceback.print_exc()\n\n\ndef quote_smart(s, safe="/"):\n return quote(s, safe)\n\n\ndef get_clsname_for_code(code, frame):\n clsname = None\n if len(code.co_varnames) > 0:\n # We are checking the first argument of the function\n # (`self` or `cls` for methods).\n first_arg_name = code.co_varnames[0]\n if first_arg_name in frame.f_locals:\n first_arg_obj = frame.f_locals[first_arg_name]\n if inspect.isclass(first_arg_obj): # class method\n first_arg_class = first_arg_obj\n else: # instance method\n if hasattr(first_arg_obj, "__class__"):\n first_arg_class = first_arg_obj.__class__\n else: # old style class, fall back on type\n first_arg_class = type(first_arg_obj)\n func_name = code.co_name\n if hasattr(first_arg_class, func_name):\n method = getattr(first_arg_class, func_name)\n func_code = None\n if hasattr(method, "func_code"): # Python2\n func_code = method.func_code\n elif hasattr(method, "__code__"): # Python3\n func_code = method.__code__\n if func_code and func_code == code:\n clsname = first_arg_class.__name__\n\n return clsname\n\n\ndef get_non_pydevd_threads():\n threads = threading.enumerate()\n return [t for t in threads if t and not getattr(t, "is_pydev_daemon_thread", False)]\n\n\nif USE_CUSTOM_SYS_CURRENT_FRAMES and IS_PYPY:\n # On PyPy we can use its fake_frames to get the traceback\n # (instead of the actual real frames that need the tracing to be correct).\n _tid_to_frame_for_dump_threads = sys._current_frames\nelse:\n from _pydevd_bundle.pydevd_constants import _current_frames as _tid_to_frame_for_dump_threads\n\n\ndef dump_threads(stream=None, show_pydevd_threads=True):\n """\n Helper to dump thread info.\n """\n if stream is None:\n stream = sys.stderr\n thread_id_to_name_and_is_pydevd_thread = {}\n try:\n threading_enumerate = _pydev_saved_modules.pydevd_saved_threading_enumerate\n if threading_enumerate is None:\n threading_enumerate = threading.enumerate\n\n for t in threading_enumerate():\n is_pydevd_thread = getattr(t, "is_pydev_daemon_thread", False)\n thread_id_to_name_and_is_pydevd_thread[t.ident] = (\n "%s (daemon: %s, pydevd thread: %s)" % (t.name, t.daemon, is_pydevd_thread),\n is_pydevd_thread,\n )\n except:\n pass\n\n stream.write("===============================================================================\n")\n stream.write("Threads running\n")\n stream.write("================================= Thread Dump =================================\n")\n stream.flush()\n\n for thread_id, frame in _tid_to_frame_for_dump_threads().items():\n name, is_pydevd_thread = thread_id_to_name_and_is_pydevd_thread.get(thread_id, (thread_id, False))\n if not show_pydevd_threads and is_pydevd_thread:\n continue\n\n stream.write("\n-------------------------------------------------------------------------------\n")\n stream.write(" Thread %s" % (name,))\n stream.write("\n\n")\n\n for i, (filename, lineno, name, line) in enumerate(traceback.extract_stack(frame)):\n stream.write(' File "%s", line %d, in %s\n' % (filename, lineno, name))\n if line:\n stream.write(" %s\n" % (line.strip()))\n\n if i == 0 and "self" in frame.f_locals:\n stream.write(" self: ")\n try:\n stream.write(str(frame.f_locals["self"]))\n except:\n stream.write("Unable to get str of: %s" % (type(frame.f_locals["self"]),))\n stream.write("\n")\n stream.flush()\n\n stream.write("\n=============================== END Thread Dump ===============================")\n stream.flush()\n\n\ndef _extract_variable_nested_braces(char_iter):\n expression = []\n level = 0\n for c in char_iter:\n if c == "{":\n level += 1\n if c == "}":\n level -= 1\n if level == -1:\n return "".join(expression).strip()\n expression.append(c)\n raise SyntaxError("Unbalanced braces in expression.")\n\n\ndef _extract_expression_list(log_message):\n # Note: not using re because of nested braces.\n expression = []\n expression_vars = []\n char_iter = iter(log_message)\n for c in char_iter:\n if c == "{":\n expression_var = _extract_variable_nested_braces(char_iter)\n if expression_var:\n expression.append("%s")\n expression_vars.append(expression_var)\n else:\n expression.append(c)\n\n expression = "".join(expression)\n return expression, expression_vars\n\n\ndef convert_dap_log_message_to_expression(log_message):\n try:\n expression, expression_vars = _extract_expression_list(log_message)\n except SyntaxError:\n return repr("Unbalanced braces in: %s" % (log_message))\n if not expression_vars:\n return repr(expression)\n # Note: use '%' to be compatible with Python 2.6.\n return repr(expression) + " % (" + ", ".join(str(x) for x in expression_vars) + ",)"\n\n\ndef notify_about_gevent_if_needed(stream=None):\n """\n When debugging with gevent check that the gevent flag is used if the user uses the gevent\n monkey-patching.\n\n :return bool:\n Returns True if a message had to be shown to the user and False otherwise.\n """\n stream = stream if stream is not None else sys.stderr\n if not SUPPORT_GEVENT:\n gevent_monkey = sys.modules.get("gevent.monkey")\n if gevent_monkey is not None:\n try:\n saved = gevent_monkey.saved\n except AttributeError:\n pydev_log.exception_once("Error checking for gevent monkey-patching.")\n return False\n\n if saved:\n # Note: print to stderr as it may deadlock the debugger.\n sys.stderr.write("%s\n" % (GEVENT_SUPPORT_NOT_SET_MSG,))\n return True\n\n return False\n\n\ndef hasattr_checked(obj, name):\n try:\n getattr(obj, name)\n except:\n # i.e.: Handle any exception, not only AttributeError.\n return False\n else:\n return True\n\n\ndef getattr_checked(obj, name):\n try:\n return getattr(obj, name)\n except:\n # i.e.: Handle any exception, not only AttributeError.\n return None\n\n\ndef dir_checked(obj):\n try:\n return dir(obj)\n except:\n return []\n\n\ndef isinstance_checked(obj, cls):\n try:\n return isinstance(obj, cls)\n except:\n return False\n\n\nclass ScopeRequest(object):\n __slots__ = ["variable_reference", "scope"]\n\n def __init__(self, variable_reference, scope):\n assert scope in ("globals", "locals")\n self.variable_reference = variable_reference\n self.scope = scope\n\n def __eq__(self, o):\n if isinstance(o, ScopeRequest):\n return self.variable_reference == o.variable_reference and self.scope == o.scope\n\n return False\n\n def __ne__(self, o):\n return not self == o\n\n def __hash__(self):\n return hash((self.variable_reference, self.scope))\n\n\nclass DAPGrouper(object):\n """\n Note: this is a helper class to group variables on the debug adapter protocol (DAP). For\n the xml protocol the type is just added to each variable and the UI can group/hide it as needed.\n """\n\n SCOPE_SPECIAL_VARS = "special variables"\n SCOPE_PROTECTED_VARS = "protected variables"\n SCOPE_FUNCTION_VARS = "function variables"\n SCOPE_CLASS_VARS = "class variables"\n\n SCOPES_SORTED = [\n SCOPE_SPECIAL_VARS,\n SCOPE_PROTECTED_VARS,\n SCOPE_FUNCTION_VARS,\n SCOPE_CLASS_VARS,\n ]\n\n __slots__ = ["variable_reference", "scope", "contents_debug_adapter_protocol"]\n\n def __init__(self, scope):\n self.variable_reference = id(self)\n self.scope = scope\n self.contents_debug_adapter_protocol = []\n\n def get_contents_debug_adapter_protocol(self):\n return self.contents_debug_adapter_protocol[:]\n\n def __eq__(self, o):\n if isinstance(o, ScopeRequest):\n return self.variable_reference == o.variable_reference and self.scope == o.scope\n\n return False\n\n def __ne__(self, o):\n return not self == o\n\n def __hash__(self):\n return hash((self.variable_reference, self.scope))\n\n def __repr__(self):\n return ""\n\n def __str__(self):\n return ""\n\n\ndef interrupt_main_thread(main_thread=None):\n """\n Generates a KeyboardInterrupt in the main thread by sending a Ctrl+C\n or by calling thread.interrupt_main().\n\n :param main_thread:\n Needed because Jython needs main_thread._thread.interrupt() to be called.\n\n Note: if unable to send a Ctrl+C, the KeyboardInterrupt will only be raised\n when the next Python instruction is about to be executed (so, it won't interrupt\n a sleep(1000)).\n """\n if main_thread is None:\n main_thread = threading.main_thread()\n\n pydev_log.debug("Interrupt main thread.")\n called = False\n try:\n if os.name == "posix":\n # On Linux we can't interrupt 0 as in Windows because it's\n # actually owned by a process -- on the good side, signals\n # work much better on Linux!\n os.kill(os.getpid(), signal.SIGINT)\n called = True\n\n elif os.name == "nt":\n # This generates a Ctrl+C only for the current process and not\n # to the process group!\n # Note: there doesn't seem to be any public documentation for this\n # function (although it seems to be present from Windows Server 2003 SP1 onwards\n # according to: https://www.geoffchappell.com/studies/windows/win32/kernel32/api/index.htm)\n ctypes.windll.kernel32.CtrlRoutine(0)\n\n # The code below is deprecated because it actually sends a Ctrl+C\n # to the process group, so, if this was a process created without\n # passing `CREATE_NEW_PROCESS_GROUP` the signal may be sent to the\n # parent process and to sub-processes too (which is not ideal --\n # for instance, when using pytest-xdist, it'll actually stop the\n # testing, even when called in the subprocess).\n\n # if hasattr_checked(signal, 'CTRL_C_EVENT'):\n # os.kill(0, signal.CTRL_C_EVENT)\n # else:\n # # Python 2.6\n # ctypes.windll.kernel32.GenerateConsoleCtrlEvent(0, 0)\n called = True\n\n except:\n # If something went wrong, fallback to interrupting when the next\n # Python instruction is being called.\n pydev_log.exception("Error interrupting main thread (using fallback).")\n\n if not called:\n try:\n # In this case, we don't really interrupt a sleep() nor IO operations\n # (this makes the KeyboardInterrupt be sent only when the next Python\n # instruction is about to be executed).\n if hasattr(thread, "interrupt_main"):\n thread.interrupt_main()\n else:\n main_thread._thread.interrupt() # Jython\n except:\n pydev_log.exception("Error on interrupt main thread fallback.")\n\n\nclass Timer(object):\n def __init__(self, min_diff=PYDEVD_WARN_SLOW_RESOLVE_TIMEOUT):\n self.min_diff = min_diff\n self._curr_time = time.time()\n\n def print_time(self, msg="Elapsed:"):\n old = self._curr_time\n new = self._curr_time = time.time()\n diff = new - old\n if diff >= self.min_diff:\n print("%s: %.2fs" % (msg, diff))\n\n def _report_slow(self, compute_msg, *args):\n old = self._curr_time\n new = self._curr_time = time.time()\n diff = new - old\n if diff >= self.min_diff:\n py_db = get_global_debugger()\n if py_db is not None:\n msg = compute_msg(diff, *args)\n py_db.writer.add_command(py_db.cmd_factory.make_warning_message(msg))\n\n def report_if_compute_repr_attr_slow(self, attrs_tab_separated, attr_name, attr_type):\n self._report_slow(self._compute_repr_slow, attrs_tab_separated, attr_name, attr_type)\n\n def _compute_repr_slow(self, diff, attrs_tab_separated, attr_name, attr_type):\n try:\n attr_type = attr_type.__name__\n except:\n pass\n if attrs_tab_separated:\n return (\n "pydevd warning: Computing repr of %s.%s (%s) was slow (took %.2fs).\n"\n "Customize report timeout by setting the `PYDEVD_WARN_SLOW_RESOLVE_TIMEOUT` environment variable to a higher timeout (default is: %ss)\n"\n ) % (attrs_tab_separated.replace("\t", "."), attr_name, attr_type, diff, PYDEVD_WARN_SLOW_RESOLVE_TIMEOUT)\n else:\n return (\n "pydevd warning: Computing repr of %s (%s) was slow (took %.2fs)\n"\n "Customize report timeout by setting the `PYDEVD_WARN_SLOW_RESOLVE_TIMEOUT` environment variable to a higher timeout (default is: %ss)\n"\n ) % (attr_name, attr_type, diff, PYDEVD_WARN_SLOW_RESOLVE_TIMEOUT)\n\n def report_if_getting_attr_slow(self, cls, attr_name):\n self._report_slow(self._compute_get_attr_slow, cls, attr_name)\n\n def _compute_get_attr_slow(self, diff, cls, attr_name):\n try:\n cls = cls.__name__\n except:\n pass\n return (\n "pydevd warning: Getting attribute %s.%s was slow (took %.2fs)\n"\n "Customize report timeout by setting the `PYDEVD_WARN_SLOW_RESOLVE_TIMEOUT` environment variable to a higher timeout (default is: %ss)\n"\n ) % (cls, attr_name, diff, PYDEVD_WARN_SLOW_RESOLVE_TIMEOUT)\n\n\ndef import_attr_from_module(import_with_attr_access):\n if "." not in import_with_attr_access:\n # We need at least one '.' (we don't support just the module import, we need the attribute access too).\n raise ImportError("Unable to import module with attr access: %s" % (import_with_attr_access,))\n\n module_name, attr_name = import_with_attr_access.rsplit(".", 1)\n\n while True:\n try:\n mod = import_module(module_name)\n except ImportError:\n if "." not in module_name:\n raise ImportError("Unable to import module with attr access: %s" % (import_with_attr_access,))\n\n module_name, new_attr_part = module_name.rsplit(".", 1)\n attr_name = new_attr_part + "." + attr_name\n else:\n # Ok, we got the base module, now, get the attribute we need.\n try:\n for attr in attr_name.split("."):\n mod = getattr(mod, attr)\n return mod\n except:\n raise ImportError("Unable to import module with attr access: %s" % (import_with_attr_access,))\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_utils.py
|
pydevd_utils.py
|
Python
| 18,336 | 0.95 | 0.257253 | 0.100239 |
node-utils
| 773 |
2024-11-24T09:28:45.314131
|
Apache-2.0
| false |
952bdea8abadf47459cd11bfde99677c
|
import sys\n\n\n# =======================================================================================================================\n# PydevdVmType\n# =======================================================================================================================\nclass PydevdVmType:\n PYTHON = "python"\n JYTHON = "jython"\n vm_type = None\n\n\n# =======================================================================================================================\n# set_vm_type\n# =======================================================================================================================\ndef set_vm_type(vm_type):\n PydevdVmType.vm_type = vm_type\n\n\n# =======================================================================================================================\n# get_vm_type\n# =======================================================================================================================\ndef get_vm_type():\n if PydevdVmType.vm_type is None:\n setup_type()\n return PydevdVmType.vm_type\n\n\n# =======================================================================================================================\n# setup_type\n# =======================================================================================================================\ndef setup_type(str=None):\n if str is not None:\n PydevdVmType.vm_type = str\n return\n\n if sys.platform.startswith("java"):\n PydevdVmType.vm_type = PydevdVmType.JYTHON\n else:\n PydevdVmType.vm_type = PydevdVmType.PYTHON\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_vm_type.py
|
pydevd_vm_type.py
|
Python
| 1,597 | 0.95 | 0.175 | 0.387097 |
python-kit
| 104 |
2024-06-19T11:35:11.834388
|
MIT
| false |
b1ae143ead52d2a6735247c4ded1bac8
|
from _pydev_bundle import pydev_log\nfrom _pydevd_bundle import pydevd_extension_utils\nfrom _pydevd_bundle import pydevd_resolver\nimport sys\nfrom _pydevd_bundle.pydevd_constants import (\n BUILTINS_MODULE_NAME,\n MAXIMUM_VARIABLE_REPRESENTATION_SIZE,\n RETURN_VALUES_DICT,\n LOAD_VALUES_ASYNC,\n DEFAULT_VALUE,\n)\nfrom _pydev_bundle.pydev_imports import quote\nfrom _pydevd_bundle.pydevd_extension_api import TypeResolveProvider, StrPresentationProvider\nfrom _pydevd_bundle.pydevd_utils import isinstance_checked, hasattr_checked, DAPGrouper\nfrom _pydevd_bundle.pydevd_resolver import get_var_scope, MoreItems, MoreItemsRange\nfrom typing import Optional\n\ntry:\n import types\n\n frame_type = types.FrameType\nexcept:\n frame_type = None\n\n\ndef make_valid_xml_value(s):\n # Same thing as xml.sax.saxutils.escape but also escaping double quotes.\n return s.replace("&", "&").replace("<", "<").replace(">", ">").replace('"', """)\n\n\nclass ExceptionOnEvaluate:\n def __init__(self, result, etype, tb):\n self.result = result\n self.etype = etype\n self.tb = tb\n\n\n_IS_JYTHON = sys.platform.startswith("java")\n\n\ndef _create_default_type_map():\n default_type_map = [\n # None means that it should not be treated as a compound variable\n # isintance does not accept a tuple on some versions of python, so, we must declare it expanded\n (\n type(None),\n None,\n ),\n (int, None),\n (float, None),\n (complex, None),\n (str, None),\n (tuple, pydevd_resolver.tupleResolver),\n (list, pydevd_resolver.tupleResolver),\n (dict, pydevd_resolver.dictResolver),\n ]\n try:\n from collections import OrderedDict\n\n default_type_map.insert(0, (OrderedDict, pydevd_resolver.orderedDictResolver))\n # we should put it before dict\n except:\n pass\n\n try:\n default_type_map.append((long, None)) # @UndefinedVariable\n except:\n pass # not available on all python versions\n\n default_type_map.append((DAPGrouper, pydevd_resolver.dapGrouperResolver))\n default_type_map.append((MoreItems, pydevd_resolver.forwardInternalResolverToObject))\n default_type_map.append((MoreItemsRange, pydevd_resolver.forwardInternalResolverToObject))\n\n try:\n default_type_map.append((set, pydevd_resolver.setResolver))\n except:\n pass # not available on all python versions\n\n try:\n default_type_map.append((frozenset, pydevd_resolver.setResolver))\n except:\n pass # not available on all python versions\n\n try:\n from django.utils.datastructures import MultiValueDict\n\n default_type_map.insert(0, (MultiValueDict, pydevd_resolver.multiValueDictResolver))\n # we should put it before dict\n except:\n pass # django may not be installed\n\n try:\n from django.forms import BaseForm\n\n default_type_map.insert(0, (BaseForm, pydevd_resolver.djangoFormResolver))\n # we should put it before instance resolver\n except:\n pass # django may not be installed\n\n try:\n from collections import deque\n\n default_type_map.append((deque, pydevd_resolver.dequeResolver))\n except:\n pass\n\n try:\n from ctypes import Array\n\n default_type_map.append((Array, pydevd_resolver.tupleResolver))\n except:\n pass\n\n if frame_type is not None:\n default_type_map.append((frame_type, pydevd_resolver.frameResolver))\n\n if _IS_JYTHON:\n from org.python import core # @UnresolvedImport\n\n default_type_map.append((core.PyNone, None))\n default_type_map.append((core.PyInteger, None))\n default_type_map.append((core.PyLong, None))\n default_type_map.append((core.PyFloat, None))\n default_type_map.append((core.PyComplex, None))\n default_type_map.append((core.PyString, None))\n default_type_map.append((core.PyTuple, pydevd_resolver.tupleResolver))\n default_type_map.append((core.PyList, pydevd_resolver.tupleResolver))\n default_type_map.append((core.PyDictionary, pydevd_resolver.dictResolver))\n default_type_map.append((core.PyStringMap, pydevd_resolver.dictResolver))\n\n if hasattr(core, "PyJavaInstance"):\n # Jython 2.5b3 removed it.\n default_type_map.append((core.PyJavaInstance, pydevd_resolver.instanceResolver))\n\n return default_type_map\n\n\nclass TypeResolveHandler(object):\n NO_PROVIDER = [] # Sentinel value (any mutable object to be used as a constant would be valid).\n\n def __init__(self):\n # Note: don't initialize with the types we already know about so that the extensions can override\n # the default resolvers that are already available if they want.\n self._type_to_resolver_cache = {}\n self._type_to_str_provider_cache = {}\n self._initialized = False\n\n def _initialize(self):\n self._default_type_map = _create_default_type_map()\n self._resolve_providers = pydevd_extension_utils.extensions_of_type(TypeResolveProvider)\n self._str_providers = pydevd_extension_utils.extensions_of_type(StrPresentationProvider)\n self._initialized = True\n\n def get_type(self, o):\n try:\n try:\n # Faster than type(o) as we don't need the function call.\n type_object = o.__class__ # could fail here\n type_name = type_object.__name__\n return self._get_type(o, type_object, type_name) # could fail here\n except:\n # Not all objects have __class__ (i.e.: there are bad bindings around).\n type_object = type(o)\n type_name = type_object.__name__\n\n try:\n return self._get_type(o, type_object, type_name)\n except:\n if isinstance(type_object, type):\n # If it's still something manageable, use the default resolver, otherwise\n # fallback to saying that it wasn't possible to get any info on it.\n return type_object, str(type_name), pydevd_resolver.defaultResolver\n\n return "Unable to get Type", "Unable to get Type", None\n except:\n # This happens for org.python.core.InitModule\n return "Unable to get Type", "Unable to get Type", None\n\n def _get_type(self, o, type_object, type_name):\n # Note: we could have an exception here if the type_object is not hashable...\n resolver = self._type_to_resolver_cache.get(type_object)\n if resolver is not None:\n return type_object, type_name, resolver\n\n if not self._initialized:\n self._initialize()\n\n try:\n for resolver in self._resolve_providers:\n if resolver.can_provide(type_object, type_name):\n # Cache it\n self._type_to_resolver_cache[type_object] = resolver\n return type_object, type_name, resolver\n\n for t in self._default_type_map:\n if isinstance_checked(o, t[0]):\n # Cache it\n resolver = t[1]\n self._type_to_resolver_cache[type_object] = resolver\n return (type_object, type_name, resolver)\n except:\n pydev_log.exception()\n\n # No match return default (and cache it).\n resolver = pydevd_resolver.defaultResolver\n self._type_to_resolver_cache[type_object] = resolver\n return type_object, type_name, resolver\n\n if _IS_JYTHON:\n _base_get_type = _get_type\n\n def _get_type(self, o, type_object, type_name):\n if type_name == "org.python.core.PyJavaInstance":\n return type_object, type_name, pydevd_resolver.instanceResolver\n\n if type_name == "org.python.core.PyArray":\n return type_object, type_name, pydevd_resolver.jyArrayResolver\n\n return self._base_get_type(o, type_object, type_name)\n\n def _get_str_from_provider(self, provider, o, context: Optional[str] = None):\n if context is not None:\n get_str_in_context = getattr(provider, "get_str_in_context", None)\n if get_str_in_context is not None:\n return get_str_in_context(o, context)\n\n return provider.get_str(o)\n\n def str_from_providers(self, o, type_object, type_name, context: Optional[str] = None):\n provider = self._type_to_str_provider_cache.get(type_object)\n\n if provider is self.NO_PROVIDER:\n return None\n\n if provider is not None:\n return self._get_str_from_provider(provider, o, context)\n\n if not self._initialized:\n self._initialize()\n\n for provider in self._str_providers:\n if provider.can_provide(type_object, type_name):\n self._type_to_str_provider_cache[type_object] = provider\n try:\n return self._get_str_from_provider(provider, o, context)\n except:\n pydev_log.exception("Error when getting str with custom provider: %s." % (provider,))\n\n self._type_to_str_provider_cache[type_object] = self.NO_PROVIDER\n return None\n\n\n_TYPE_RESOLVE_HANDLER = TypeResolveHandler()\n\n"""\ndef get_type(o):\n Receives object and returns a triple (type_object, type_string, resolver).\n\n resolver != None means that variable is a container, and should be displayed as a hierarchy.\n\n Use the resolver to get its attributes.\n\n All container objects (i.e.: dict, list, tuple, object, etc) should have a resolver.\n"""\nget_type = _TYPE_RESOLVE_HANDLER.get_type\n\n_str_from_providers = _TYPE_RESOLVE_HANDLER.str_from_providers\n\n\ndef is_builtin(x):\n return getattr(x, "__module__", None) == BUILTINS_MODULE_NAME\n\n\ndef should_evaluate_full_value(val):\n return not LOAD_VALUES_ASYNC or (is_builtin(type(val)) and not isinstance_checked(val, (list, tuple, dict)))\n\n\ndef return_values_from_dict_to_xml(return_dict):\n res = []\n for name, val in return_dict.items():\n res.append(var_to_xml(val, name, additional_in_xml=' isRetVal="True"'))\n return "".join(res)\n\n\ndef frame_vars_to_xml(frame_f_locals, hidden_ns=None):\n """dumps frame variables to XML\n <var name="var_name" scope="local" type="type" value="value"/>\n """\n xml = []\n\n keys = sorted(frame_f_locals)\n\n return_values_xml = []\n\n for k in keys:\n try:\n v = frame_f_locals[k]\n eval_full_val = should_evaluate_full_value(v)\n\n if k == "_pydev_stop_at_break":\n continue\n\n if k == RETURN_VALUES_DICT:\n for name, val in v.items():\n return_values_xml.append(var_to_xml(val, name, additional_in_xml=' isRetVal="True"'))\n\n else:\n if hidden_ns is not None and k in hidden_ns:\n xml.append(var_to_xml(v, str(k), additional_in_xml=' isIPythonHidden="True"', evaluate_full_value=eval_full_val))\n else:\n xml.append(var_to_xml(v, str(k), evaluate_full_value=eval_full_val))\n except Exception:\n pydev_log.exception("Unexpected error, recovered safely.")\n\n # Show return values as the first entry.\n return_values_xml.extend(xml)\n return "".join(return_values_xml)\n\n\ndef get_variable_details(val, evaluate_full_value=True, to_string=None, context: Optional[str] = None):\n """\n :param context:\n This is the context in which the variable is being requested. Valid values:\n "watch",\n "repl",\n "hover",\n "clipboard"\n """\n try:\n # This should be faster than isinstance (but we have to protect against not having a '__class__' attribute).\n is_exception_on_eval = val.__class__ == ExceptionOnEvaluate\n except:\n is_exception_on_eval = False\n\n if is_exception_on_eval:\n v = val.result\n else:\n v = val\n\n _type, type_name, resolver = get_type(v)\n type_qualifier = getattr(_type, "__module__", "")\n if not evaluate_full_value:\n value = DEFAULT_VALUE\n else:\n try:\n str_from_provider = _str_from_providers(v, _type, type_name, context)\n if str_from_provider is not None:\n value = str_from_provider\n\n elif to_string is not None:\n value = to_string(v)\n\n elif hasattr_checked(v, "__class__"):\n if v.__class__ == frame_type:\n value = pydevd_resolver.frameResolver.get_frame_name(v)\n\n elif v.__class__ in (list, tuple):\n if len(v) > 300:\n value = "%s: %s" % (str(v.__class__), "<Too big to print. Len: %s>" % (len(v),))\n else:\n value = "%s: %s" % (str(v.__class__), v)\n else:\n try:\n cName = str(v.__class__)\n if cName.find(".") != -1:\n cName = cName.split(".")[-1]\n\n elif cName.find("'") != -1: # does not have '.' (could be something like <type 'int'>)\n cName = cName[cName.index("'") + 1 :]\n\n if cName.endswith("'>"):\n cName = cName[:-2]\n except:\n cName = str(v.__class__)\n\n value = "%s: %s" % (cName, v)\n else:\n value = str(v)\n except:\n try:\n value = repr(v)\n except:\n value = "Unable to get repr for %s" % v.__class__\n\n # fix to work with unicode values\n try:\n if value.__class__ == bytes:\n value = value.decode("utf-8", "replace")\n except TypeError:\n pass\n\n return type_name, type_qualifier, is_exception_on_eval, resolver, value\n\n\ndef var_to_xml(val, name, trim_if_too_big=True, additional_in_xml="", evaluate_full_value=True):\n """single variable or dictionary to xml representation"""\n\n type_name, type_qualifier, is_exception_on_eval, resolver, value = get_variable_details(val, evaluate_full_value)\n\n scope = get_var_scope(name, val, "", True)\n try:\n name = quote(name, "/>_= ") # TODO: Fix PY-5834 without using quote\n except:\n pass\n\n xml = '<var name="%s" type="%s" ' % (make_valid_xml_value(name), make_valid_xml_value(type_name))\n\n if type_qualifier:\n xml_qualifier = 'qualifier="%s"' % make_valid_xml_value(type_qualifier)\n else:\n xml_qualifier = ""\n\n if value:\n # cannot be too big... communication may not handle it.\n if len(value) > MAXIMUM_VARIABLE_REPRESENTATION_SIZE and trim_if_too_big:\n value = value[0:MAXIMUM_VARIABLE_REPRESENTATION_SIZE]\n value += "..."\n\n xml_value = ' value="%s"' % (make_valid_xml_value(quote(value, "/>_= ")))\n else:\n xml_value = ""\n\n if is_exception_on_eval:\n xml_container = ' isErrorOnEval="True"'\n else:\n if resolver is not None:\n xml_container = ' isContainer="True"'\n else:\n xml_container = ""\n\n if scope:\n return "".join((xml, xml_qualifier, xml_value, xml_container, additional_in_xml, ' scope="', scope, '"', " />\n"))\n else:\n return "".join((xml, xml_qualifier, xml_value, xml_container, additional_in_xml, " />\n"))\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_xml.py
|
pydevd_xml.py
|
Python
| 15,933 | 0.95 | 0.195853 | 0.065089 |
python-kit
| 294 |
2024-06-28T18:24:49.300751
|
Apache-2.0
| false |
fbf3bf1c54a90993989aaf0fb3bd7bbf
|
import time\n\nfrom _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding\nfrom _pydev_bundle._pydev_saved_modules import threading\nfrom _pydevd_bundle import pydevd_xml\nfrom _pydevd_bundle.pydevd_constants import GlobalDebuggerHolder\nfrom _pydevd_bundle.pydevd_constants import get_thread_id\nfrom _pydevd_bundle.pydevd_net_command import NetCommand\nfrom _pydevd_bundle.pydevd_concurrency_analyser.pydevd_thread_wrappers import ObjectWrapper, wrap_attr\nimport pydevd_file_utils\nfrom _pydev_bundle import pydev_log\nimport sys\n\nfile_system_encoding = getfilesystemencoding()\n\nfrom urllib.parse import quote\n\nthreadingCurrentThread = threading.current_thread\n\nDONT_TRACE_THREADING = ["threading.py", "pydevd.py"]\nINNER_METHODS = ["_stop"]\nINNER_FILES = ["threading.py"]\nTHREAD_METHODS = ["start", "_stop", "join"]\nLOCK_METHODS = ["__init__", "acquire", "release", "__enter__", "__exit__"]\nQUEUE_METHODS = ["put", "get"]\n\n# return time since epoch in milliseconds\ncur_time = lambda: int(round(time.time() * 1000000))\n\n\ndef get_text_list_for_frame(frame):\n # partial copy-paste from make_thread_suspend_str\n curFrame = frame\n cmdTextList = []\n try:\n while curFrame:\n # print cmdText\n myId = str(id(curFrame))\n # print "id is ", myId\n\n if curFrame.f_code is None:\n break # Iron Python sometimes does not have it!\n\n myName = curFrame.f_code.co_name # method name (if in method) or ? if global\n if myName is None:\n break # Iron Python sometimes does not have it!\n\n # print "name is ", myName\n\n absolute_filename = pydevd_file_utils.get_abs_path_real_path_and_base_from_frame(curFrame)[0]\n\n my_file, _applied_mapping = pydevd_file_utils.map_file_to_client(absolute_filename)\n\n # print "file is ", my_file\n # my_file = inspect.getsourcefile(curFrame) or inspect.getfile(frame)\n\n myLine = str(curFrame.f_lineno)\n # print "line is ", myLine\n\n # the variables are all gotten 'on-demand'\n # variables = pydevd_xml.frame_vars_to_xml(curFrame.f_locals)\n\n variables = ""\n cmdTextList.append('<frame id="%s" name="%s" ' % (myId, pydevd_xml.make_valid_xml_value(myName)))\n cmdTextList.append('file="%s" line="%s">' % (quote(my_file, "/>_= \t"), myLine))\n cmdTextList.append(variables)\n cmdTextList.append("</frame>")\n curFrame = curFrame.f_back\n except:\n pydev_log.exception()\n\n return cmdTextList\n\n\ndef send_concurrency_message(event_class, time, name, thread_id, type, event, file, line, frame, lock_id=0, parent=None):\n dbg = GlobalDebuggerHolder.global_dbg\n if dbg is None:\n return\n cmdTextList = ["<xml>"]\n\n cmdTextList.append("<" + event_class)\n cmdTextList.append(' time="%s"' % pydevd_xml.make_valid_xml_value(str(time)))\n cmdTextList.append(' name="%s"' % pydevd_xml.make_valid_xml_value(name))\n cmdTextList.append(' thread_id="%s"' % pydevd_xml.make_valid_xml_value(thread_id))\n cmdTextList.append(' type="%s"' % pydevd_xml.make_valid_xml_value(type))\n if type == "lock":\n cmdTextList.append(' lock_id="%s"' % pydevd_xml.make_valid_xml_value(str(lock_id)))\n if parent is not None:\n cmdTextList.append(' parent="%s"' % pydevd_xml.make_valid_xml_value(parent))\n cmdTextList.append(' event="%s"' % pydevd_xml.make_valid_xml_value(event))\n cmdTextList.append(' file="%s"' % pydevd_xml.make_valid_xml_value(file))\n cmdTextList.append(' line="%s"' % pydevd_xml.make_valid_xml_value(str(line)))\n cmdTextList.append("></" + event_class + ">")\n\n cmdTextList += get_text_list_for_frame(frame)\n cmdTextList.append("</xml>")\n\n text = "".join(cmdTextList)\n if dbg.writer is not None:\n dbg.writer.add_command(NetCommand(145, 0, text))\n\n\ndef log_new_thread(global_debugger, t):\n event_time = cur_time() - global_debugger.thread_analyser.start_time\n send_concurrency_message(\n "threading_event", event_time, t.name, get_thread_id(t), "thread", "start", "code_name", 0, None, parent=get_thread_id(t)\n )\n\n\nclass ThreadingLogger:\n def __init__(self):\n self.start_time = cur_time()\n\n def set_start_time(self, time):\n self.start_time = time\n\n def log_event(self, frame):\n write_log = False\n self_obj = None\n if "self" in frame.f_locals:\n self_obj = frame.f_locals["self"]\n if isinstance(self_obj, threading.Thread) or self_obj.__class__ == ObjectWrapper:\n write_log = True\n if hasattr(frame, "f_back") and frame.f_back is not None:\n back = frame.f_back\n if hasattr(back, "f_back") and back.f_back is not None:\n back = back.f_back\n if "self" in back.f_locals:\n if isinstance(back.f_locals["self"], threading.Thread):\n write_log = True\n try:\n if write_log:\n t = threadingCurrentThread()\n back = frame.f_back\n if not back:\n return\n name, _, back_base = pydevd_file_utils.get_abs_path_real_path_and_base_from_frame(back)\n event_time = cur_time() - self.start_time\n method_name = frame.f_code.co_name\n\n if isinstance(self_obj, threading.Thread):\n if not hasattr(self_obj, "_pydev_run_patched"):\n wrap_attr(self_obj, "run")\n if (method_name in THREAD_METHODS) and (\n back_base not in DONT_TRACE_THREADING or (method_name in INNER_METHODS and back_base in INNER_FILES)\n ):\n thread_id = get_thread_id(self_obj)\n name = self_obj.getName()\n real_method = frame.f_code.co_name\n parent = None\n if real_method == "_stop":\n if back_base in INNER_FILES and back.f_code.co_name == "_wait_for_tstate_lock":\n back = back.f_back.f_back\n real_method = "stop"\n if hasattr(self_obj, "_pydev_join_called"):\n parent = get_thread_id(t)\n elif real_method == "join":\n # join called in the current thread, not in self object\n if not self_obj.is_alive():\n return\n thread_id = get_thread_id(t)\n name = t.name\n self_obj._pydev_join_called = True\n\n if real_method == "start":\n parent = get_thread_id(t)\n send_concurrency_message(\n "threading_event",\n event_time,\n name,\n thread_id,\n "thread",\n real_method,\n back.f_code.co_filename,\n back.f_lineno,\n back,\n parent=parent,\n )\n # print(event_time, self_obj.getName(), thread_id, "thread",\n # real_method, back.f_code.co_filename, back.f_lineno)\n\n if method_name == "pydev_after_run_call":\n if hasattr(frame, "f_back") and frame.f_back is not None:\n back = frame.f_back\n if hasattr(back, "f_back") and back.f_back is not None:\n back = back.f_back\n if "self" in back.f_locals:\n if isinstance(back.f_locals["self"], threading.Thread):\n my_self_obj = frame.f_back.f_back.f_locals["self"]\n my_back = frame.f_back.f_back\n my_thread_id = get_thread_id(my_self_obj)\n send_massage = True\n if hasattr(my_self_obj, "_pydev_join_called"):\n send_massage = False\n # we can't detect stop after join in Python 2 yet\n if send_massage:\n send_concurrency_message(\n "threading_event",\n event_time,\n "Thread",\n my_thread_id,\n "thread",\n "stop",\n my_back.f_code.co_filename,\n my_back.f_lineno,\n my_back,\n parent=None,\n )\n\n if self_obj.__class__ == ObjectWrapper:\n if back_base in DONT_TRACE_THREADING:\n # do not trace methods called from threading\n return\n back_back_base = pydevd_file_utils.get_abs_path_real_path_and_base_from_frame(back.f_back)[2]\n back = back.f_back\n if back_back_base in DONT_TRACE_THREADING:\n # back_back_base is the file, where the method was called froms\n return\n if method_name == "__init__":\n send_concurrency_message(\n "threading_event",\n event_time,\n t.name,\n get_thread_id(t),\n "lock",\n method_name,\n back.f_code.co_filename,\n back.f_lineno,\n back,\n lock_id=str(id(frame.f_locals["self"])),\n )\n if "attr" in frame.f_locals and (frame.f_locals["attr"] in LOCK_METHODS or frame.f_locals["attr"] in QUEUE_METHODS):\n real_method = frame.f_locals["attr"]\n if method_name == "call_begin":\n real_method += "_begin"\n elif method_name == "call_end":\n real_method += "_end"\n else:\n return\n if real_method == "release_end":\n # do not log release end. Maybe use it later\n return\n send_concurrency_message(\n "threading_event",\n event_time,\n t.name,\n get_thread_id(t),\n "lock",\n real_method,\n back.f_code.co_filename,\n back.f_lineno,\n back,\n lock_id=str(id(self_obj)),\n )\n\n if real_method in ("put_end", "get_end"):\n # fake release for queue, cause we don't call it directly\n send_concurrency_message(\n "threading_event",\n event_time,\n t.name,\n get_thread_id(t),\n "lock",\n "release",\n back.f_code.co_filename,\n back.f_lineno,\n back,\n lock_id=str(id(self_obj)),\n )\n # print(event_time, t.name, get_thread_id(t), "lock",\n # real_method, back.f_code.co_filename, back.f_lineno)\n\n except Exception:\n pydev_log.exception()\n\n\nclass NameManager:\n def __init__(self, name_prefix):\n self.tasks = {}\n self.last = 0\n self.prefix = name_prefix\n\n def get(self, id):\n if id not in self.tasks:\n self.last += 1\n self.tasks[id] = self.prefix + "-" + str(self.last)\n return self.tasks[id]\n\n\nclass AsyncioLogger:\n def __init__(self):\n self.task_mgr = NameManager("Task")\n self.coro_mgr = NameManager("Coro")\n self.start_time = cur_time()\n\n def get_task_id(self, frame):\n asyncio = sys.modules.get("asyncio")\n if asyncio is None:\n # If asyncio was not imported, there's nothing to be done\n # (also fixes issue where multiprocessing is imported due\n # to asyncio).\n return None\n while frame is not None:\n if "self" in frame.f_locals:\n self_obj = frame.f_locals["self"]\n if isinstance(self_obj, asyncio.Task):\n method_name = frame.f_code.co_name\n if method_name == "_step":\n return id(self_obj)\n frame = frame.f_back\n return None\n\n def log_event(self, frame):\n event_time = cur_time() - self.start_time\n\n # Debug loop iterations\n # if isinstance(self_obj, asyncio.base_events.BaseEventLoop):\n # if method_name == "_run_once":\n # print("Loop iteration")\n\n if not hasattr(frame, "f_back") or frame.f_back is None:\n return\n\n asyncio = sys.modules.get("asyncio")\n if asyncio is None:\n # If asyncio was not imported, there's nothing to be done\n # (also fixes issue where multiprocessing is imported due\n # to asyncio).\n return\n\n back = frame.f_back\n\n if "self" in frame.f_locals:\n self_obj = frame.f_locals["self"]\n if isinstance(self_obj, asyncio.Task):\n method_name = frame.f_code.co_name\n if method_name == "set_result":\n task_id = id(self_obj)\n task_name = self.task_mgr.get(str(task_id))\n send_concurrency_message(\n "asyncio_event", event_time, task_name, task_name, "thread", "stop", frame.f_code.co_filename, frame.f_lineno, frame\n )\n\n method_name = back.f_code.co_name\n if method_name == "__init__":\n task_id = id(self_obj)\n task_name = self.task_mgr.get(str(task_id))\n send_concurrency_message(\n "asyncio_event",\n event_time,\n task_name,\n task_name,\n "thread",\n "start",\n frame.f_code.co_filename,\n frame.f_lineno,\n frame,\n )\n\n method_name = frame.f_code.co_name\n if isinstance(self_obj, asyncio.Lock):\n if method_name in ("acquire", "release"):\n task_id = self.get_task_id(frame)\n task_name = self.task_mgr.get(str(task_id))\n\n if method_name == "acquire":\n if not self_obj._waiters and not self_obj.locked():\n send_concurrency_message(\n "asyncio_event",\n event_time,\n task_name,\n task_name,\n "lock",\n method_name + "_begin",\n frame.f_code.co_filename,\n frame.f_lineno,\n frame,\n lock_id=str(id(self_obj)),\n )\n if self_obj.locked():\n method_name += "_begin"\n else:\n method_name += "_end"\n elif method_name == "release":\n method_name += "_end"\n\n send_concurrency_message(\n "asyncio_event",\n event_time,\n task_name,\n task_name,\n "lock",\n method_name,\n frame.f_code.co_filename,\n frame.f_lineno,\n frame,\n lock_id=str(id(self_obj)),\n )\n\n if isinstance(self_obj, asyncio.Queue):\n if method_name in ("put", "get", "_put", "_get"):\n task_id = self.get_task_id(frame)\n task_name = self.task_mgr.get(str(task_id))\n\n if method_name == "put":\n send_concurrency_message(\n "asyncio_event",\n event_time,\n task_name,\n task_name,\n "lock",\n "acquire_begin",\n frame.f_code.co_filename,\n frame.f_lineno,\n frame,\n lock_id=str(id(self_obj)),\n )\n elif method_name == "_put":\n send_concurrency_message(\n "asyncio_event",\n event_time,\n task_name,\n task_name,\n "lock",\n "acquire_end",\n frame.f_code.co_filename,\n frame.f_lineno,\n frame,\n lock_id=str(id(self_obj)),\n )\n send_concurrency_message(\n "asyncio_event",\n event_time,\n task_name,\n task_name,\n "lock",\n "release",\n frame.f_code.co_filename,\n frame.f_lineno,\n frame,\n lock_id=str(id(self_obj)),\n )\n elif method_name == "get":\n back = frame.f_back\n if back.f_code.co_name != "send":\n send_concurrency_message(\n "asyncio_event",\n event_time,\n task_name,\n task_name,\n "lock",\n "acquire_begin",\n frame.f_code.co_filename,\n frame.f_lineno,\n frame,\n lock_id=str(id(self_obj)),\n )\n else:\n send_concurrency_message(\n "asyncio_event",\n event_time,\n task_name,\n task_name,\n "lock",\n "acquire_end",\n frame.f_code.co_filename,\n frame.f_lineno,\n frame,\n lock_id=str(id(self_obj)),\n )\n send_concurrency_message(\n "asyncio_event",\n event_time,\n task_name,\n task_name,\n "lock",\n "release",\n frame.f_code.co_filename,\n frame.f_lineno,\n frame,\n lock_id=str(id(self_obj)),\n )\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_concurrency_analyser\pydevd_concurrency_logger.py
|
pydevd_concurrency_logger.py
|
Python
| 21,190 | 0.95 | 0.165975 | 0.06993 |
vue-tools
| 801 |
2024-06-14T20:55:57.494162
|
GPL-3.0
| false |
6645f2a5ea68b781551b23559f27a122
|
from _pydev_bundle._pydev_saved_modules import threading\n\n\ndef wrapper(fun):\n def pydev_after_run_call():\n pass\n\n def inner(*args, **kwargs):\n fun(*args, **kwargs)\n pydev_after_run_call()\n\n return inner\n\n\ndef wrap_attr(obj, attr):\n t_save_start = getattr(obj, attr)\n setattr(obj, attr, wrapper(t_save_start))\n obj._pydev_run_patched = True\n\n\nclass ObjectWrapper(object):\n def __init__(self, obj):\n self.wrapped_object = obj\n try:\n import functools\n\n functools.update_wrapper(self, obj)\n except:\n pass\n\n def __getattr__(self, attr):\n orig_attr = getattr(self.wrapped_object, attr) # .__getattribute__(attr)\n if callable(orig_attr):\n\n def patched_attr(*args, **kwargs):\n self.call_begin(attr)\n result = orig_attr(*args, **kwargs)\n self.call_end(attr)\n if result == self.wrapped_object:\n return self\n return result\n\n return patched_attr\n else:\n return orig_attr\n\n def call_begin(self, attr):\n pass\n\n def call_end(self, attr):\n pass\n\n def __enter__(self):\n self.call_begin("__enter__")\n self.wrapped_object.__enter__()\n self.call_end("__enter__")\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.call_begin("__exit__")\n self.wrapped_object.__exit__(exc_type, exc_val, exc_tb)\n\n\ndef factory_wrapper(fun):\n def inner(*args, **kwargs):\n obj = fun(*args, **kwargs)\n return ObjectWrapper(obj)\n\n return inner\n\n\ndef wrap_threads():\n # TODO: add wrappers for thread and _thread\n # import _thread as mod\n # print("Thread imported")\n # mod.start_new_thread = wrapper(mod.start_new_thread)\n threading.Lock = factory_wrapper(threading.Lock)\n threading.RLock = factory_wrapper(threading.RLock)\n\n # queue patching\n import queue # @UnresolvedImport\n\n queue.Queue = factory_wrapper(queue.Queue)\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_concurrency_analyser\pydevd_thread_wrappers.py
|
pydevd_thread_wrappers.py
|
Python
| 2,120 | 0.95 | 0.231707 | 0.084746 |
awesome-app
| 797 |
2024-11-01T15:28:58.398513
|
Apache-2.0
| false |
0e2ffbc1dc4aa220114cbcf7651169e0
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_concurrency_analyser\__pycache__\pydevd_concurrency_logger.cpython-313.pyc
|
pydevd_concurrency_logger.cpython-313.pyc
|
Other
| 17,468 | 0.8 | 0 | 0.007194 |
python-kit
| 21 |
2023-12-05T11:16:24.601336
|
GPL-3.0
| false |
647a86159edb4831b16643f25c605c3d
|
\n\n
|
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_concurrency_analyser\__pycache__\pydevd_thread_wrappers.cpython-313.pyc
|
pydevd_thread_wrappers.cpython-313.pyc
|
Other
| 3,903 | 0.8 | 0 | 0 |
node-utils
| 580 |
2025-04-22T00:17:47.432521
|
Apache-2.0
| false |
bc85613a55760834c03ab6d72c2ccc32
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.