content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_concurrency_analyser\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
242
0.7
0
0
vue-tools
547
2024-11-21T08:58:38.902252
GPL-3.0
false
6c241cb55ca6cf5f11960aa7fe4da242
from _pydevd_bundle._debug_adapter.pydevd_schema_log import debug_exception\nimport json\nimport itertools\nfrom functools import partial\n\n\nclass BaseSchema(object):\n @staticmethod\n def initialize_ids_translation():\n BaseSchema._dap_id_to_obj_id = {0: 0, None: None}\n BaseSchema._obj_id_to_dap_id = {0: 0, None: None}\n BaseSchema._next_dap_id = partial(next, itertools.count(1))\n\n def to_json(self):\n return json.dumps(self.to_dict())\n\n @staticmethod\n def _translate_id_to_dap(obj_id):\n if obj_id == "*":\n return "*"\n # Note: we don't invalidate ids, so, if some object starts using the same id\n # of another object, the same id will be used.\n dap_id = BaseSchema._obj_id_to_dap_id.get(obj_id)\n if dap_id is None:\n dap_id = BaseSchema._obj_id_to_dap_id[obj_id] = BaseSchema._next_dap_id()\n BaseSchema._dap_id_to_obj_id[dap_id] = obj_id\n return dap_id\n\n @staticmethod\n def _translate_id_from_dap(dap_id):\n if dap_id == "*":\n return "*"\n try:\n return BaseSchema._dap_id_to_obj_id[dap_id]\n except:\n raise KeyError("Wrong ID sent from the client: %s" % (dap_id,))\n\n @staticmethod\n def update_dict_ids_to_dap(dct):\n return dct\n\n @staticmethod\n def update_dict_ids_from_dap(dct):\n return dct\n\n\nBaseSchema.initialize_ids_translation()\n\n_requests_to_types = {}\n_responses_to_types = {}\n_event_to_types = {}\n_all_messages = {}\n\n\ndef register(cls):\n _all_messages[cls.__name__] = cls\n return cls\n\n\ndef register_request(command):\n def do_register(cls):\n _requests_to_types[command] = cls\n return cls\n\n return do_register\n\n\ndef register_response(command):\n def do_register(cls):\n _responses_to_types[command] = cls\n return cls\n\n return do_register\n\n\ndef register_event(event):\n def do_register(cls):\n _event_to_types[event] = cls\n return cls\n\n return do_register\n\n\ndef from_dict(dct, update_ids_from_dap=False):\n msg_type = dct.get("type")\n if msg_type is None:\n raise ValueError("Unable to make sense of message: %s" % (dct,))\n\n if msg_type == "request":\n to_type = _requests_to_types\n use = dct["command"]\n\n elif msg_type == "response":\n to_type = _responses_to_types\n use = dct["command"]\n\n else:\n to_type = _event_to_types\n use = dct["event"]\n\n cls = to_type.get(use)\n if cls is None:\n raise ValueError("Unable to create message from dict: %s. %s not in %s" % (dct, use, sorted(to_type.keys())))\n try:\n return cls(update_ids_from_dap=update_ids_from_dap, **dct)\n except:\n msg = "Error creating %s from %s" % (cls, dct)\n debug_exception(msg)\n raise\n\n\ndef from_json(json_msg, update_ids_from_dap=False, on_dict_loaded=lambda dct: None):\n if isinstance(json_msg, bytes):\n json_msg = json_msg.decode("utf-8")\n\n as_dict = json.loads(json_msg)\n on_dict_loaded(as_dict)\n try:\n return from_dict(as_dict, update_ids_from_dap=update_ids_from_dap)\n except:\n if as_dict.get("type") == "response" and not as_dict.get("success"):\n # Error messages may not have required body (return as a generic Response).\n Response = _all_messages["Response"]\n return Response(**as_dict)\n else:\n raise\n\n\ndef get_response_class(request):\n if request.__class__ == dict:\n return _responses_to_types[request["command"]]\n return _responses_to_types[request.command]\n\n\ndef build_response(request, kwargs=None):\n if kwargs is None:\n kwargs = {"success": True}\n else:\n if "success" not in kwargs:\n kwargs["success"] = True\n response_class = _responses_to_types[request.command]\n kwargs.setdefault("seq", -1) # To be overwritten before sending\n return response_class(command=request.command, request_seq=request.seq, **kwargs)\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\_debug_adapter\pydevd_base_schema.py
pydevd_base_schema.py
Python
4,143
0.95
0.230769
0.027523
awesome-app
240
2025-02-28T11:31:37.122063
Apache-2.0
false
5c5fbccd1fd52c103914ac7de1770b90
import os\nimport traceback\nfrom _pydevd_bundle.pydevd_constants import ForkSafeLock\n\n_pid = os.getpid()\n_pid_msg = "%s: " % (_pid,)\n\n_debug_lock = ForkSafeLock()\n\nDEBUG = False\nDEBUG_FILE = os.path.join(os.path.dirname(__file__), "__debug_output__.txt")\n\n\ndef debug(msg):\n if DEBUG:\n with _debug_lock:\n _pid_prefix = _pid_msg\n if isinstance(msg, bytes):\n _pid_prefix = _pid_prefix.encode("utf-8")\n\n if not msg.endswith(b"\r") and not msg.endswith(b"\n"):\n msg += b"\n"\n mode = "a+b"\n else:\n if not msg.endswith("\r") and not msg.endswith("\n"):\n msg += "\n"\n mode = "a+"\n with open(DEBUG_FILE, mode) as stream:\n stream.write(_pid_prefix)\n stream.write(msg)\n\n\ndef debug_exception(msg=None):\n if DEBUG:\n if msg:\n debug(msg)\n\n with _debug_lock:\n with open(DEBUG_FILE, "a+") as stream:\n _pid_prefix = _pid_msg\n if isinstance(msg, bytes):\n _pid_prefix = _pid_prefix.encode("utf-8")\n stream.write(_pid_prefix)\n\n traceback.print_exc(file=stream)\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\_debug_adapter\pydevd_schema_log.py
pydevd_schema_log.py
Python
1,299
0.85
0.2
0
node-utils
356
2025-03-27T07:33:51.667521
GPL-3.0
false
cdc5f56247e6aa773587973197051764
"""\nRun this module to regenerate the `pydevd_schema.py` file.\n\nNote that it'll generate it based on the current debugProtocol.json. Erase it and rerun\nto download the latest version.\n"""\n\n\ndef is_variable_to_translate(cls_name, var_name):\n if var_name in ("variablesReference", "frameId", "threadId"):\n return True\n\n if cls_name == "StackFrame" and var_name == "id":\n # It's frameId everywhere except on StackFrame.\n return True\n\n if cls_name == "Thread" and var_name == "id":\n # It's threadId everywhere except on Thread.\n return True\n\n return False\n\n\ndef _get_noqa_for_var(prop_name):\n return " # noqa (assign to builtin)" if prop_name in ("type", "format", "id", "hex", "breakpoint", "filter") else ""\n\n\nclass _OrderedSet(object):\n # Not a good ordered set (just something to be small without adding any deps)\n\n def __init__(self, initial_contents=None):\n self._contents = []\n self._contents_as_set = set()\n if initial_contents is not None:\n for x in initial_contents:\n self.add(x)\n\n def add(self, x):\n if x not in self._contents_as_set:\n self._contents_as_set.add(x)\n self._contents.append(x)\n\n def discard(self, x):\n if x in self._contents_as_set:\n self._contents_as_set.remove(x)\n self._contents.remove(x)\n\n def copy(self):\n return _OrderedSet(self._contents)\n\n def update(self, contents):\n for x in contents:\n self.add(x)\n\n def __iter__(self):\n return iter(self._contents)\n\n def __contains__(self, item):\n return item in self._contents_as_set\n\n def __len__(self):\n return len(self._contents)\n\n def set_repr(self):\n if len(self) == 0:\n return "set()"\n\n lst = [repr(x) for x in self]\n return "set([" + ", ".join(lst) + "])"\n\n\nclass Ref(object):\n def __init__(self, ref, ref_data):\n self.ref = ref\n self.ref_data = ref_data\n\n def __str__(self):\n return self.ref\n\n\ndef load_schema_data():\n import os.path\n import json\n\n json_file = os.path.join(os.path.dirname(__file__), "debugProtocol.json")\n if not os.path.exists(json_file):\n import requests\n\n req = requests.get("https://raw.githubusercontent.com/microsoft/debug-adapter-protocol/gh-pages/debugAdapterProtocol.json")\n assert req.status_code == 200\n with open(json_file, "wb") as stream:\n stream.write(req.content)\n\n with open(json_file, "rb") as json_contents:\n json_schema_data = json.loads(json_contents.read())\n return json_schema_data\n\n\ndef load_custom_schema_data():\n import os.path\n import json\n\n json_file = os.path.join(os.path.dirname(__file__), "debugProtocolCustom.json")\n\n with open(json_file, "rb") as json_contents:\n json_schema_data = json.loads(json_contents.read())\n return json_schema_data\n\n\ndef create_classes_to_generate_structure(json_schema_data):\n definitions = json_schema_data["definitions"]\n\n class_to_generatees = {}\n\n for name, definition in definitions.items():\n all_of = definition.get("allOf")\n description = definition.get("description")\n is_enum = definition.get("type") == "string" and "enum" in definition\n enum_values = None\n if is_enum:\n enum_values = definition["enum"]\n properties = {}\n properties.update(definition.get("properties", {}))\n required = _OrderedSet(definition.get("required", _OrderedSet()))\n base_definitions = []\n\n if all_of is not None:\n for definition in all_of:\n ref = definition.get("$ref")\n if ref is not None:\n assert ref.startswith("#/definitions/")\n ref = ref[len("#/definitions/") :]\n base_definitions.append(ref)\n else:\n if not description:\n description = definition.get("description")\n properties.update(definition.get("properties", {}))\n required.update(_OrderedSet(definition.get("required", _OrderedSet())))\n\n if isinstance(description, (list, tuple)):\n description = "\n".join(description)\n\n if name == "ModulesRequest": # Hack to accept modules request without arguments (ptvsd: 2050).\n required.discard("arguments")\n class_to_generatees[name] = dict(\n name=name,\n properties=properties,\n base_definitions=base_definitions,\n description=description,\n required=required,\n is_enum=is_enum,\n enum_values=enum_values,\n )\n return class_to_generatees\n\n\ndef collect_bases(curr_class, classes_to_generate, memo=None):\n ret = []\n if memo is None:\n memo = {}\n\n base_definitions = curr_class["base_definitions"]\n for base_definition in base_definitions:\n if base_definition not in memo:\n ret.append(base_definition)\n ret.extend(collect_bases(classes_to_generate[base_definition], classes_to_generate, memo))\n\n return ret\n\n\ndef fill_properties_and_required_from_base(classes_to_generate):\n # Now, resolve properties based on refs\n for class_to_generate in classes_to_generate.values():\n dct = {}\n s = _OrderedSet()\n\n for base_definition in reversed(collect_bases(class_to_generate, classes_to_generate)):\n # Note: go from base to current so that the initial order of the properties has that\n # same order.\n dct.update(classes_to_generate[base_definition].get("properties", {}))\n s.update(classes_to_generate[base_definition].get("required", _OrderedSet()))\n\n dct.update(class_to_generate["properties"])\n class_to_generate["properties"] = dct\n\n s.update(class_to_generate["required"])\n class_to_generate["required"] = s\n\n return class_to_generate\n\n\ndef update_class_to_generate_description(class_to_generate):\n import textwrap\n\n description = class_to_generate["description"]\n lines = []\n for line in description.splitlines():\n wrapped = textwrap.wrap(line.strip(), 100)\n lines.extend(wrapped)\n lines.append("")\n\n while lines and lines[-1] == "":\n lines = lines[:-1]\n\n class_to_generate["description"] = " " + ("\n ".join(lines))\n\n\ndef update_class_to_generate_type(classes_to_generate, class_to_generate):\n properties = class_to_generate.get("properties")\n for _prop_name, prop_val in properties.items():\n prop_type = prop_val.get("type", "")\n if not prop_type:\n prop_type = prop_val.pop("$ref", "")\n if prop_type:\n assert prop_type.startswith("#/definitions/")\n prop_type = prop_type[len("#/definitions/") :]\n prop_val["type"] = Ref(prop_type, classes_to_generate[prop_type])\n\n\ndef update_class_to_generate_register_dec(classes_to_generate, class_to_generate):\n # Default\n class_to_generate["register_request"] = ""\n class_to_generate["register_dec"] = "@register"\n\n properties = class_to_generate.get("properties")\n enum_type = properties.get("type", {}).get("enum")\n command = None\n event = None\n if enum_type and len(enum_type) == 1 and next(iter(enum_type)) in ("request", "response", "event"):\n msg_type = next(iter(enum_type))\n if msg_type == "response":\n # The actual command is typed in the request\n response_name = class_to_generate["name"]\n request_name = response_name[: -len("Response")] + "Request"\n if request_name in classes_to_generate:\n command = classes_to_generate[request_name]["properties"].get("command")\n else:\n if response_name == "ErrorResponse":\n command = {"enum": ["error"]}\n else:\n raise AssertionError("Unhandled: %s" % (response_name,))\n\n elif msg_type == "request":\n command = properties.get("command")\n\n elif msg_type == "event":\n command = properties.get("event")\n\n else:\n raise AssertionError("Unexpected condition.")\n\n if command:\n enum = command.get("enum")\n if enum and len(enum) == 1:\n class_to_generate["register_request"] = "@register_%s(%r)\n" % (msg_type, enum[0])\n\n\ndef extract_prop_name_and_prop(class_to_generate):\n properties = class_to_generate.get("properties")\n required = _OrderedSet(class_to_generate.get("required", _OrderedSet()))\n\n # Sort so that required come first\n prop_name_and_prop = list(properties.items())\n\n def compute_sort_key(x):\n key = x[0]\n if key in required:\n if key == "seq":\n return 0.5 # seq when required is after the other required keys (to have a default of -1).\n return 0\n return 1\n\n prop_name_and_prop.sort(key=compute_sort_key)\n\n return prop_name_and_prop\n\n\ndef update_class_to_generate_to_json(class_to_generate):\n required = _OrderedSet(class_to_generate.get("required", _OrderedSet()))\n prop_name_and_prop = extract_prop_name_and_prop(class_to_generate)\n\n to_dict_body = ["def to_dict(self, update_ids_to_dap=False): # noqa (update_ids_to_dap may be unused)"]\n\n translate_prop_names = []\n for prop_name, prop in prop_name_and_prop:\n if is_variable_to_translate(class_to_generate["name"], prop_name):\n translate_prop_names.append(prop_name)\n\n for prop_name, prop in prop_name_and_prop:\n namespace = dict(prop_name=prop_name, noqa=_get_noqa_for_var(prop_name))\n to_dict_body.append(" %(prop_name)s = self.%(prop_name)s%(noqa)s" % namespace)\n\n if prop.get("type") == "array":\n to_dict_body.append(' if %(prop_name)s and hasattr(%(prop_name)s[0], "to_dict"):' % namespace)\n to_dict_body.append(" %(prop_name)s = [x.to_dict() for x in %(prop_name)s]" % namespace)\n\n if translate_prop_names:\n to_dict_body.append(" if update_ids_to_dap:")\n for prop_name in translate_prop_names:\n namespace = dict(prop_name=prop_name, noqa=_get_noqa_for_var(prop_name))\n to_dict_body.append(" if %(prop_name)s is not None:" % namespace)\n to_dict_body.append(" %(prop_name)s = self._translate_id_to_dap(%(prop_name)s)%(noqa)s" % namespace)\n\n if not translate_prop_names:\n update_dict_ids_from_dap_body = []\n else:\n update_dict_ids_from_dap_body = ["", "", "@classmethod", "def update_dict_ids_from_dap(cls, dct):"]\n for prop_name in translate_prop_names:\n namespace = dict(prop_name=prop_name)\n update_dict_ids_from_dap_body.append(" if %(prop_name)r in dct:" % namespace)\n update_dict_ids_from_dap_body.append(" dct[%(prop_name)r] = cls._translate_id_from_dap(dct[%(prop_name)r])" % namespace)\n update_dict_ids_from_dap_body.append(" return dct")\n\n class_to_generate["update_dict_ids_from_dap"] = _indent_lines("\n".join(update_dict_ids_from_dap_body))\n\n to_dict_body.append(" dct = {")\n first_not_required = False\n\n for prop_name, prop in prop_name_and_prop:\n use_to_dict = prop["type"].__class__ == Ref and not prop["type"].ref_data.get("is_enum", False)\n is_array = prop["type"] == "array"\n ref_array_cls_name = ""\n if is_array:\n ref = prop["items"].get("$ref")\n if ref is not None:\n ref_array_cls_name = ref.split("/")[-1]\n\n namespace = dict(prop_name=prop_name, ref_array_cls_name=ref_array_cls_name)\n if prop_name in required:\n if use_to_dict:\n to_dict_body.append(" %(prop_name)r: %(prop_name)s.to_dict(update_ids_to_dap=update_ids_to_dap)," % namespace)\n else:\n if ref_array_cls_name:\n to_dict_body.append(\n " %(prop_name)r: [%(ref_array_cls_name)s.update_dict_ids_to_dap(o) for o in %(prop_name)s] if (update_ids_to_dap and %(prop_name)s) else %(prop_name)s,"\n % namespace\n )\n else:\n to_dict_body.append(" %(prop_name)r: %(prop_name)s," % namespace)\n else:\n if not first_not_required:\n first_not_required = True\n to_dict_body.append(" }")\n\n to_dict_body.append(" if %(prop_name)s is not None:" % namespace)\n if use_to_dict:\n to_dict_body.append(" dct[%(prop_name)r] = %(prop_name)s.to_dict(update_ids_to_dap=update_ids_to_dap)" % namespace)\n else:\n if ref_array_cls_name:\n to_dict_body.append(\n " dct[%(prop_name)r] = [%(ref_array_cls_name)s.update_dict_ids_to_dap(o) for o in %(prop_name)s] if (update_ids_to_dap and %(prop_name)s) else %(prop_name)s"\n % namespace\n )\n else:\n to_dict_body.append(" dct[%(prop_name)r] = %(prop_name)s" % namespace)\n\n if not first_not_required:\n first_not_required = True\n to_dict_body.append(" }")\n\n to_dict_body.append(" dct.update(self.kwargs)")\n to_dict_body.append(" return dct")\n\n class_to_generate["to_dict"] = _indent_lines("\n".join(to_dict_body))\n\n if not translate_prop_names:\n update_dict_ids_to_dap_body = []\n else:\n update_dict_ids_to_dap_body = ["", "", "@classmethod", "def update_dict_ids_to_dap(cls, dct):"]\n for prop_name in translate_prop_names:\n namespace = dict(prop_name=prop_name)\n update_dict_ids_to_dap_body.append(" if %(prop_name)r in dct:" % namespace)\n update_dict_ids_to_dap_body.append(" dct[%(prop_name)r] = cls._translate_id_to_dap(dct[%(prop_name)r])" % namespace)\n update_dict_ids_to_dap_body.append(" return dct")\n\n class_to_generate["update_dict_ids_to_dap"] = _indent_lines("\n".join(update_dict_ids_to_dap_body))\n\n\ndef update_class_to_generate_init(class_to_generate):\n args = []\n init_body = []\n docstring = []\n\n required = _OrderedSet(class_to_generate.get("required", _OrderedSet()))\n prop_name_and_prop = extract_prop_name_and_prop(class_to_generate)\n\n translate_prop_names = []\n for prop_name, prop in prop_name_and_prop:\n if is_variable_to_translate(class_to_generate["name"], prop_name):\n translate_prop_names.append(prop_name)\n\n enum = prop.get("enum")\n if enum and len(enum) == 1:\n init_body.append(" self.%(prop_name)s = %(enum)r" % dict(prop_name=prop_name, enum=next(iter(enum))))\n else:\n if prop_name in required:\n if prop_name == "seq":\n args.append(prop_name + "=-1")\n else:\n args.append(prop_name)\n else:\n args.append(prop_name + "=None")\n\n if prop["type"].__class__ == Ref:\n ref = prop["type"]\n ref_data = ref.ref_data\n if ref_data.get("is_enum", False):\n init_body.append(" if %s is not None:" % (prop_name,))\n init_body.append(" assert %s in %s.VALID_VALUES" % (prop_name, str(ref)))\n init_body.append(" self.%(prop_name)s = %(prop_name)s" % dict(prop_name=prop_name))\n else:\n namespace = dict(prop_name=prop_name, ref_name=str(ref))\n init_body.append(" if %(prop_name)s is None:" % namespace)\n init_body.append(" self.%(prop_name)s = %(ref_name)s()" % namespace)\n init_body.append(" else:")\n init_body.append(\n " self.%(prop_name)s = %(ref_name)s(update_ids_from_dap=update_ids_from_dap, **%(prop_name)s) if %(prop_name)s.__class__ != %(ref_name)s else %(prop_name)s"\n % namespace\n )\n\n else:\n init_body.append(" self.%(prop_name)s = %(prop_name)s" % dict(prop_name=prop_name))\n\n if prop["type"] == "array":\n ref = prop["items"].get("$ref")\n if ref is not None:\n ref_array_cls_name = ref.split("/")[-1]\n init_body.append(" if update_ids_from_dap and self.%(prop_name)s:" % dict(prop_name=prop_name))\n init_body.append(" for o in self.%(prop_name)s:" % dict(prop_name=prop_name))\n init_body.append(\n " %(ref_array_cls_name)s.update_dict_ids_from_dap(o)" % dict(ref_array_cls_name=ref_array_cls_name)\n )\n\n prop_type = prop["type"]\n prop_description = prop.get("description", "")\n\n if isinstance(prop_description, (list, tuple)):\n prop_description = "\n ".join(prop_description)\n\n docstring.append(\n ":param %(prop_type)s %(prop_name)s: %(prop_description)s"\n % dict(prop_type=prop_type, prop_name=prop_name, prop_description=prop_description)\n )\n\n if translate_prop_names:\n init_body.append(" if update_ids_from_dap:")\n for prop_name in translate_prop_names:\n init_body.append(" self.%(prop_name)s = self._translate_id_from_dap(self.%(prop_name)s)" % dict(prop_name=prop_name))\n\n docstring = _indent_lines("\n".join(docstring))\n init_body = "\n".join(init_body)\n\n # Actually bundle the whole __init__ from the parts.\n args = ", ".join(args)\n if args:\n args = ", " + args\n\n # Note: added kwargs because some messages are expected to be extended by the user (so, we'll actually\n # make all extendable so that we don't have to worry about which ones -- we loose a little on typing,\n # but may be better than doing a allow list based on something only pointed out in the documentation).\n class_to_generate[\n "init"\n ] = '''def __init__(self%(args)s, update_ids_from_dap=False, **kwargs): # noqa (update_ids_from_dap may be unused)\n """\n%(docstring)s\n """\n%(init_body)s\n self.kwargs = kwargs\n''' % dict(args=args, init_body=init_body, docstring=docstring)\n\n class_to_generate["init"] = _indent_lines(class_to_generate["init"])\n\n\ndef update_class_to_generate_props(class_to_generate):\n import json\n\n def default(o):\n if isinstance(o, Ref):\n return o.ref\n raise AssertionError("Unhandled: %s" % (o,))\n\n properties = class_to_generate["properties"]\n class_to_generate["props"] = (\n " __props__ = %s" % _indent_lines(json.dumps(properties, indent=4, default=default).replace("true", "True")).strip()\n )\n\n\ndef update_class_to_generate_refs(class_to_generate):\n properties = class_to_generate["properties"]\n class_to_generate["refs"] = (\n " __refs__ = %s" % _OrderedSet(key for (key, val) in properties.items() if val["type"].__class__ == Ref).set_repr()\n )\n\n\ndef update_class_to_generate_enums(class_to_generate):\n class_to_generate["enums"] = ""\n if class_to_generate.get("is_enum", False):\n enums = ""\n for enum in class_to_generate["enum_values"]:\n enums += " %s = %r\n" % (enum.upper(), enum)\n enums += "\n"\n enums += " VALID_VALUES = %s\n\n" % _OrderedSet(class_to_generate["enum_values"]).set_repr()\n class_to_generate["enums"] = enums\n\n\ndef update_class_to_generate_objects(classes_to_generate, class_to_generate):\n properties = class_to_generate["properties"]\n for key, val in properties.items():\n if "type" not in val:\n val["type"] = "TypeNA"\n continue\n\n if val["type"] == "object":\n create_new = val.copy()\n create_new.update(\n {\n "name": "%s%s" % (class_to_generate["name"], key.title()),\n "description": ' "%s" of %s' % (key, class_to_generate["name"]),\n }\n )\n if "properties" not in create_new:\n create_new["properties"] = {}\n\n assert create_new["name"] not in classes_to_generate\n classes_to_generate[create_new["name"]] = create_new\n\n update_class_to_generate_type(classes_to_generate, create_new)\n update_class_to_generate_props(create_new)\n\n # Update nested object types\n update_class_to_generate_objects(classes_to_generate, create_new)\n\n val["type"] = Ref(create_new["name"], classes_to_generate[create_new["name"]])\n val.pop("properties", None)\n\n\ndef gen_debugger_protocol():\n import os.path\n import sys\n\n if sys.version_info[:2] < (3, 6):\n raise AssertionError("Must be run with Python 3.6 onwards (to keep dict order).")\n\n classes_to_generate = create_classes_to_generate_structure(load_schema_data())\n classes_to_generate.update(create_classes_to_generate_structure(load_custom_schema_data()))\n\n class_to_generate = fill_properties_and_required_from_base(classes_to_generate)\n\n for class_to_generate in list(classes_to_generate.values()):\n update_class_to_generate_description(class_to_generate)\n update_class_to_generate_type(classes_to_generate, class_to_generate)\n update_class_to_generate_props(class_to_generate)\n update_class_to_generate_objects(classes_to_generate, class_to_generate)\n\n for class_to_generate in classes_to_generate.values():\n update_class_to_generate_refs(class_to_generate)\n update_class_to_generate_init(class_to_generate)\n update_class_to_generate_enums(class_to_generate)\n update_class_to_generate_to_json(class_to_generate)\n update_class_to_generate_register_dec(classes_to_generate, class_to_generate)\n\n class_template = '''\n%(register_request)s%(register_dec)s\nclass %(name)s(BaseSchema):\n """\n%(description)s\n\n Note: automatically generated code. Do not edit manually.\n """\n\n%(enums)s%(props)s\n%(refs)s\n\n __slots__ = list(__props__.keys()) + ['kwargs']\n\n%(init)s%(update_dict_ids_from_dap)s\n\n%(to_dict)s%(update_dict_ids_to_dap)s\n'''\n\n contents = []\n contents.append("# coding: utf-8")\n contents.append("# Automatically generated code.")\n contents.append("# Do not edit manually.")\n contents.append("# Generated by running: %s" % os.path.basename(__file__))\n contents.append("from .pydevd_base_schema import BaseSchema, register, register_request, register_response, register_event")\n contents.append("")\n for class_to_generate in classes_to_generate.values():\n contents.append(class_template % class_to_generate)\n\n parent_dir = os.path.dirname(__file__)\n schema = os.path.join(parent_dir, "pydevd_schema.py")\n with open(schema, "w", encoding="utf-8") as stream:\n stream.write("\n".join(contents))\n\n\ndef _indent_lines(lines, indent=" "):\n out_lines = []\n for line in lines.splitlines(keepends=True):\n out_lines.append(indent + line)\n\n return "".join(out_lines)\n\n\nif __name__ == "__main__":\n gen_debugger_protocol()\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\_debug_adapter\__main__pydevd_gen_debug_adapter_protocol.py
__main__pydevd_gen_debug_adapter_protocol.py
Python
23,959
0.95
0.234323
0.029915
awesome-app
628
2024-05-07T00:04:32.778496
MIT
false
d402399e26e4d4152468d306db46eddb
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\_debug_adapter\__pycache__\pydevd_base_schema.cpython-313.pyc
pydevd_base_schema.cpython-313.pyc
Other
6,346
0.8
0
0
vue-tools
515
2025-04-25T23:47:58.166161
BSD-3-Clause
false
6cf82e58f7d342f82a39db0f2d5bb128
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\_debug_adapter\__pycache__\pydevd_schema_log.cpython-313.pyc
pydevd_schema_log.cpython-313.pyc
Other
2,433
0.8
0
0
python-kit
242
2024-02-21T14:03:59.145280
Apache-2.0
false
32914c0047c6d2d145d1204221aa272a
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\_debug_adapter\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
229
0.7
0
0
python-kit
642
2024-12-22T06:30:56.646026
GPL-3.0
false
10dfb6ce75bc297f0a6dfa6e05bf4a16
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\_debug_adapter\__pycache__\__main__pydevd_gen_debug_adapter_protocol.cpython-313.pyc
__main__pydevd_gen_debug_adapter_protocol.cpython-313.pyc
Other
27,759
0.95
0.090909
0
react-lib
697
2025-02-11T22:34:07.992221
BSD-3-Clause
false
136e48f1409c1fa1b9eac2634a678ea5
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevconsole_code.cpython-313.pyc
pydevconsole_code.cpython-313.pyc
Other
19,777
0.95
0.094545
0
python-kit
269
2024-08-18T09:57:26.474687
MIT
false
f8fa2153d6903500531686ebbb073780
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_additional_thread_info.cpython-313.pyc
pydevd_additional_thread_info.cpython-313.pyc
Other
907
0.7
0
0
vue-tools
111
2025-06-06T11:49:18.652455
GPL-3.0
false
3f19254dc8b3191d803fd5977014b323
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_additional_thread_info_regular.cpython-313.pyc
pydevd_additional_thread_info_regular.cpython-313.pyc
Other
8,108
0.8
0.029412
0
python-kit
406
2025-01-30T03:13:45.503230
GPL-3.0
false
003f4a438824f2ca0985813f2a178c74
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_api.cpython-313.pyc
pydevd_api.cpython-313.pyc
Other
54,753
0.75
0.064815
0.002475
node-utils
674
2024-08-06T05:11:33.348012
BSD-3-Clause
false
3e0e032dc74d65bec0a903475b72bccf
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_breakpoints.cpython-313.pyc
pydevd_breakpoints.cpython-313.pyc
Other
8,151
0.95
0
0.014925
react-lib
378
2024-11-25T13:28:44.141177
BSD-3-Clause
false
1889a080049dd12d92e42c3cd3d4db2e
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_bytecode_utils.cpython-313.pyc
pydevd_bytecode_utils.cpython-313.pyc
Other
38,171
0.95
0.02583
0.003906
node-utils
800
2023-10-11T19:07:23.626280
GPL-3.0
false
e1c09dd9aeaa1c87dff59237a30a2ade
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_bytecode_utils_py311.cpython-313.pyc
pydevd_bytecode_utils_py311.cpython-313.pyc
Other
4,778
0.8
0.116279
0
react-lib
324
2024-11-20T06:31:08.946385
MIT
false
954384c46f7930af900b20a9ea6eb5fd
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_code_to_source.cpython-313.pyc
pydevd_code_to_source.cpython-313.pyc
Other
27,930
0.95
0.046332
0
awesome-app
310
2024-02-29T02:47:46.773716
MIT
false
5b0114a977384d2bf750ac36d1fee718
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_collect_bytecode_info.cpython-313.pyc
pydevd_collect_bytecode_info.cpython-313.pyc
Other
35,732
0.8
0.035156
0
awesome-app
701
2024-12-18T11:27:53.845725
BSD-3-Clause
false
df6a4591468517dc899384d2aff30847
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_comm.cpython-313.pyc
pydevd_comm.cpython-313.pyc
Other
90,535
0.75
0.032043
0.004323
awesome-app
362
2024-09-06T02:49:07.959623
MIT
false
a4618be02f58935e3e4865340122a065
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_command_line_handling.cpython-313.pyc
pydevd_command_line_handling.cpython-313.pyc
Other
6,937
0.95
0.025641
0.027397
python-kit
6
2025-01-21T22:00:47.781895
GPL-3.0
false
f97a4a9c1a4dc52e9a6034228b55f805
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_comm_constants.cpython-313.pyc
pydevd_comm_constants.cpython-313.pyc
Other
5,945
0.8
0
0.065217
node-utils
58
2024-07-23T19:51:19.093169
Apache-2.0
false
e1a094ba30c2f1708a030fb3ac85e729
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_console.cpython-313.pyc
pydevd_console.cpython-313.pyc
Other
11,569
0.95
0.06
0
react-lib
115
2025-05-19T18:43:43.821980
BSD-3-Clause
false
68b4310c74c21cbfb522d26fffcf1b8f
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_constants.cpython-313.pyc
pydevd_constants.cpython-313.pyc
Other
29,319
0.95
0.047414
0.013825
node-utils
677
2023-08-06T12:21:32.714584
BSD-3-Clause
false
b9b312427eea12776cd02ef1e708c0ed
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_custom_frames.cpython-313.pyc
pydevd_custom_frames.cpython-313.pyc
Other
4,983
0.8
0.053571
0
node-utils
361
2025-03-18T03:57:26.399153
Apache-2.0
false
1a4bcb85455a9fac356e5fc348665e30
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_cython_wrapper.cpython-313.pyc
pydevd_cython_wrapper.cpython-313.pyc
Other
1,911
0.95
0
0
python-kit
29
2024-08-22T19:20:37.640453
Apache-2.0
false
0b2a8b79368713e08ae52c52aa045b76
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_daemon_thread.cpython-313.pyc
pydevd_daemon_thread.cpython-313.pyc
Other
9,815
0.95
0.0625
0.032787
node-utils
525
2023-10-02T21:32:24.098283
GPL-3.0
false
8657b26a295c9d76d8194161225dcd6b
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_defaults.cpython-313.pyc
pydevd_defaults.cpython-313.pyc
Other
2,749
0.95
0.038462
0
python-kit
504
2023-11-30T07:13:49.157685
MIT
false
10e9568ff4802b905593c558fa65777f
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_dont_trace.cpython-313.pyc
pydevd_dont_trace.cpython-313.pyc
Other
2,699
0.95
0.117647
0
node-utils
734
2023-10-19T19:25:35.805259
BSD-3-Clause
false
c40e0fc9619ab7e38c0350846546bcba
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_dont_trace_files.cpython-313.pyc
pydevd_dont_trace_files.cpython-313.pyc
Other
6,377
0.95
0
0.02439
awesome-app
406
2025-01-08T10:42:34.539825
BSD-3-Clause
false
e46d9b42a54c6b98d63a565eccf41734
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_exec2.cpython-313.pyc
pydevd_exec2.cpython-313.pyc
Other
431
0.7
0
0
react-lib
336
2023-11-04T07:05:41.339006
MIT
false
a7a266e79bf2ec9af8eae8e70a2a9702
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_extension_api.cpython-313.pyc
pydevd_extension_api.cpython-313.pyc
Other
5,459
0.95
0.125
0
node-utils
903
2025-04-13T16:03:46.727460
GPL-3.0
false
87081377f048f671224c17285bb3190c
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_extension_utils.cpython-313.pyc
pydevd_extension_utils.cpython-313.pyc
Other
3,894
0.95
0.026316
0
react-lib
554
2023-09-16T15:35:44.442907
MIT
false
4fb9aa0af61d6f96e94755821ecf7995
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_filtering.cpython-313.pyc
pydevd_filtering.cpython-313.pyc
Other
15,224
0.95
0.035398
0
awesome-app
715
2024-05-05T23:44:55.036902
BSD-3-Clause
false
b665fcc01dbac1adac5c7888fd8bcf1b
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_frame.cpython-313.pyc
pydevd_frame.cpython-313.pyc
Other
37,935
0.95
0.021552
0
python-kit
230
2025-01-02T12:25:48.325913
MIT
false
0f1e7c9d11a49ff363577d55f8ec8adb
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_frame_utils.cpython-313.pyc
pydevd_frame_utils.cpython-313.pyc
Other
18,496
0.95
0.01676
0
node-utils
492
2023-12-14T15:09:34.189645
BSD-3-Clause
false
bd65e292a65793d5568813e95c3c9e4e
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_gevent_integration.cpython-313.pyc
pydevd_gevent_integration.cpython-313.pyc
Other
4,417
0.8
0.045455
0
vue-tools
153
2025-02-13T00:45:52.756831
MIT
false
68e7b09f9005d28cd8cc929e99b4e937
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_import_class.cpython-313.pyc
pydevd_import_class.cpython-313.pyc
Other
2,136
0.95
0
0
vue-tools
41
2024-02-09T21:09:13.807911
MIT
false
6bc9fe2db6f77ac798f7f21630c1a2dc
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_io.cpython-313.pyc
pydevd_io.cpython-313.pyc
Other
11,490
0.95
0.025641
0.063636
python-kit
79
2024-09-03T10:04:57.096930
GPL-3.0
false
473bba5d1294f58e68c3cc160cd12a97
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_json_debug_options.cpython-313.pyc
pydevd_json_debug_options.cpython-313.pyc
Other
7,206
0.8
0.014925
0.017241
node-utils
105
2024-06-07T21:59:17.763589
Apache-2.0
false
63a1eefbe6d02400112735eef47fc223
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_net_command.cpython-313.pyc
pydevd_net_command.cpython-313.pyc
Other
6,149
0.8
0
0
python-kit
244
2025-03-26T02:08:11.897170
BSD-3-Clause
false
5b9731d7930f71ac9536d7466c857005
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_net_command_factory_json.cpython-313.pyc
pydevd_net_command_factory_json.cpython-313.pyc
Other
28,088
0.95
0.037736
0
awesome-app
746
2025-06-24T23:31:03.421608
Apache-2.0
false
0412cfaac543c5d40ee713b80bd688fb
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_net_command_factory_xml.cpython-313.pyc
pydevd_net_command_factory_xml.cpython-313.pyc
Other
29,057
0.95
0.032051
0
vue-tools
104
2024-01-15T05:01:01.972421
Apache-2.0
false
c74d094a71223a4be152655b50cff701
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_plugin_utils.cpython-313.pyc
pydevd_plugin_utils.cpython-313.pyc
Other
8,980
0.95
0.038462
0
node-utils
272
2024-01-18T16:45:31.247900
BSD-3-Clause
false
6cc64094964e707879375fe1872eaadd
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_process_net_command.cpython-313.pyc
pydevd_process_net_command.cpython-313.pyc
Other
35,326
0.8
0
0
awesome-app
677
2025-05-29T11:00:25.097215
MIT
false
0d1e0998306ee32d799503b4a8b0a9b5
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_process_net_command_json.cpython-313.pyc
pydevd_process_net_command_json.cpython-313.pyc
Other
60,807
0.75
0.026087
0.015873
python-kit
440
2024-04-24T00:23:44.801065
GPL-3.0
false
f136cc2e7e92a3bf59ec6a1ebcb2e7f5
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_referrers.cpython-313.pyc
pydevd_referrers.cpython-313.pyc
Other
9,483
0.8
0.047945
0.008547
react-lib
994
2024-12-22T06:07:32.001360
Apache-2.0
false
e012fa2d20971184084cf8f2559153cc
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_reload.cpython-313.pyc
pydevd_reload.cpython-313.pyc
Other
14,408
0.95
0.106145
0
awesome-app
74
2024-03-23T16:55:16.210932
Apache-2.0
false
85ab77a7b786f2e8bb26ab21bb6ea0c8
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_resolver.cpython-313.pyc
pydevd_resolver.cpython-313.pyc
Other
33,209
0.95
0.003968
0.012658
node-utils
623
2025-02-28T18:59:49.915935
BSD-3-Clause
false
70c4508ea04f00211c3732e4f165cf57
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_runpy.cpython-313.pyc
pydevd_runpy.cpython-313.pyc
Other
15,002
0.95
0.063584
0
python-kit
173
2024-08-26T10:05:21.630217
Apache-2.0
false
c64eb67facbf7be541c33a4619a8a65d
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_safe_repr.cpython-313.pyc
pydevd_safe_repr.cpython-313.pyc
Other
13,621
0.8
0.023077
0
vue-tools
261
2023-07-18T00:21:05.338207
BSD-3-Clause
false
21762afbecf14e768701029ecf2f5f56
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_save_locals.cpython-313.pyc
pydevd_save_locals.cpython-313.pyc
Other
3,905
0.95
0.046512
0.02439
awesome-app
243
2025-02-17T12:55:16.371219
Apache-2.0
false
d668ac55a731f2d2ecb7c4714b4f5628
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_signature.cpython-313.pyc
pydevd_signature.cpython-313.pyc
Other
10,335
0.95
0.010204
0
react-lib
380
2023-10-13T06:13:16.940821
GPL-3.0
false
0a42e2e167ae81139749baa96ee02b3a
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_source_mapping.cpython-313.pyc
pydevd_source_mapping.cpython-313.pyc
Other
7,837
0.8
0.047619
0
awesome-app
162
2024-07-15T03:03:31.263378
BSD-3-Clause
false
264dcec83fa7285efc423953442be782
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_stackless.cpython-313.pyc
pydevd_stackless.cpython-313.pyc
Other
13,945
0.95
0.012821
0
node-utils
369
2024-10-16T06:17:33.918264
GPL-3.0
false
28e234e0bddc58c87be87cb7b5d5dfca
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_suspended_frames.cpython-313.pyc
pydevd_suspended_frames.cpython-313.pyc
Other
23,877
0.95
0.030612
0
react-lib
921
2025-02-05T13:06:34.249981
BSD-3-Clause
false
b56d058470a9e18424dbfb4ed142ad0b
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_thread_lifecycle.cpython-313.pyc
pydevd_thread_lifecycle.cpython-313.pyc
Other
4,528
0.8
0
0
awesome-app
497
2024-11-15T05:18:06.807209
Apache-2.0
false
520f34c4251ad8822a33c5de740da31f
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_timeout.cpython-313.pyc
pydevd_timeout.cpython-313.pyc
Other
10,787
0.95
0.076087
0
python-kit
848
2023-12-23T09:41:37.204787
BSD-3-Clause
false
61e5a0266399c5ea2a1baf1562c3045e
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_traceproperty.cpython-313.pyc
pydevd_traceproperty.cpython-313.pyc
Other
3,980
0.8
0.090909
0
vue-tools
658
2024-07-24T03:25:42.734642
BSD-3-Clause
false
b9b86d7649e39c97519c6652afe9d844
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_trace_dispatch.cpython-313.pyc
pydevd_trace_dispatch.cpython-313.pyc
Other
3,707
0.8
0
0
node-utils
974
2025-04-12T07:15:33.011092
GPL-3.0
false
5a20b1f1abd9ca6b02b2259f333f6c43
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_trace_dispatch_regular.cpython-313.pyc
pydevd_trace_dispatch_regular.cpython-313.pyc
Other
14,803
0.8
0.022059
0.015504
react-lib
552
2025-01-01T03:54:55.677923
BSD-3-Clause
false
23d4f086dbba440b3a2d1512397d41ca
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_utils.cpython-313.pyc
pydevd_utils.cpython-313.pyc
Other
21,211
0.95
0.042683
0
react-lib
895
2023-11-14T18:33:03.275035
MIT
false
bb0561ae12c1a2529963a5fbdd45c135
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_vars.cpython-313.pyc
pydevd_vars.cpython-313.pyc
Other
32,944
0.95
0.036011
0
vue-tools
473
2025-05-29T08:08:20.250880
Apache-2.0
false
c03bf34aaa2b0663c92c63ad75964620
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_vm_type.cpython-313.pyc
pydevd_vm_type.cpython-313.pyc
Other
1,344
0.8
0
0
python-kit
421
2024-09-15T22:11:02.318554
BSD-3-Clause
false
5e717bc1d65c5e4eea8e776746822579
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\pydevd_xml.cpython-313.pyc
pydevd_xml.cpython-313.pyc
Other
16,840
0.95
0.006803
0
awesome-app
142
2023-08-14T01:12:11.667145
MIT
false
a33ab4adce8b772f4615c3cb29ecaccb
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_bundle\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
214
0.7
0
0
awesome-app
836
2024-01-01T07:53:03.924350
Apache-2.0
false
18c82993d450443ef840614b4c337bd4
/pydevd_frame_evaluator.*.so\n/pydevd_frame_evaluator.*.pyd\n/pydevd_frame_evaluator.pyx\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\.gitignore
.gitignore
Other
90
0.5
0
0
vue-tools
16
2024-03-08T23:42:58.964913
BSD-3-Clause
false
3979304ccc70a4cd3dd9b723504f8ac0
from cpython.mem cimport PyMem_Malloc, PyMem_Free\n\ncdef extern from *:\n ctypedef void PyObject\n ctypedef struct PyCodeObject:\n int co_argcount; # arguments, except *args */\n int co_kwonlyargcount; # keyword only arguments */\n int co_nlocals; # local variables */\n int co_stacksize; # entries needed for evaluation stack */\n int co_flags; # CO_..., see below */\n int co_firstlineno; # first source line number */\n PyObject *co_code; # instruction opcodes */\n PyObject *co_consts; # list (constants used) */\n PyObject *co_names; # list of strings (names used) */\n PyObject *co_varnames; # tuple of strings (local variable names) */\n PyObject *co_freevars; # tuple of strings (free variable names) */\n PyObject *co_cellvars; # tuple of strings (cell variable names) */\n unsigned char *co_cell2arg; # Maps cell vars which are arguments. */\n PyObject *co_filename; # unicode (where it was loaded from) */\n PyObject *co_name; # unicode (name, for reference) */\n PyObject *co_lnotab; # string (encoding addr<->lineno mapping) See\n # Objects/lnotab_notes.txt for details. */\n void *co_zombieframe; # for optimization only (see frameobject.c) */\n PyObject *co_weakreflist; # to support weakrefs to code objects */\n void *co_extra;\n\ncdef extern from "frameobject.h":\n ctypedef struct PyFrameObject:\n PyFrameObject *f_back\n PyCodeObject *f_code # code segment\n PyObject *f_builtins # builtin symbol table (PyDictObject)\n PyObject *f_globals # global symbol table (PyDictObject) */\n PyObject *f_locals # local symbol table (any mapping) */\n PyObject **f_valuestack #\n PyObject **f_stacktop\n PyObject *f_trace # Trace function */\n PyObject *f_exc_type\n PyObject *f_exc_value\n PyObject *f_exc_traceback\n PyObject *f_gen;\n\n int f_lasti; #/* Last instruction if called */\n int f_lineno; #/* Current line number */\n int f_iblock; #/* index in f_blockstack */\n char f_executing; #/* whether the frame is still executing */\n PyObject *f_localsplus[1];\n\ncdef extern from "release_mem.h":\n void release_co_extra(void *)\n\ncdef extern from "code.h":\n ctypedef void freefunc(void *)\n int _PyCode_GetExtra(PyObject *code, Py_ssize_t index, void **extra)\n int _PyCode_SetExtra(PyObject *code, Py_ssize_t index, void *extra)\n \n# TODO: Things are in a different place for Python 3.11.\n# cdef extern from "cpython/code.h":\n# ctypedef void freefunc(void *)\n# int _PyCode_GetExtra(PyObject *code, Py_ssize_t index, void **extra)\n# int _PyCode_SetExtra(PyObject *code, Py_ssize_t index, void *extra)\n\ncdef extern from "Python.h":\n void Py_INCREF(object o)\n void Py_DECREF(object o)\n object PyImport_ImportModule(char *name)\n PyObject* PyObject_CallFunction(PyObject *callable, const char *format, ...)\n object PyObject_GetAttrString(object o, char *attr_name)\n\ncdef extern from "pystate.h":\n # ctypedef PyObject* _PyFrameEvalFunction(PyThreadState* tstate, PyFrameObject *frame, int exc)\n # ctypedef PyObject* _PyFrameEvalFunction(PyFrameObject *frame, int exc)\n ctypedef PyObject* _PyFrameEvalFunction(...)\n\n ctypedef struct PyInterpreterState:\n PyInterpreterState *next\n PyInterpreterState *tstate_head\n\n PyObject *modules\n\n PyObject *modules_by_index\n PyObject *sysdict\n PyObject *builtins\n PyObject *importlib\n\n PyObject *codec_search_path\n PyObject *codec_search_cache\n PyObject *codec_error_registry\n int codecs_initialized\n int fscodec_initialized\n\n int dlopenflags\n\n PyObject *builtins_copy\n PyObject *import_func\n # Initialized to PyEval_EvalFrameDefault().\n _PyFrameEvalFunction eval_frame\n\n ctypedef struct PyThreadState:\n PyThreadState *prev\n PyThreadState *next\n PyInterpreterState *interp\n # ...\n\n PyThreadState *PyThreadState_Get()\n\ncdef extern from "ceval.h":\n '''\n#if PY_VERSION_HEX >= 0x03090000\nPyObject * noop(PyFrameObject *frame, int exc) {\n return NULL;\n}\n#define CALL_EvalFrameDefault_38(a, b) noop(a, b)\n#define CALL_EvalFrameDefault_39(a, b, c) _PyEval_EvalFrameDefault(a, b, c)\n#else\nPyObject * noop(PyThreadState* tstate, PyFrameObject *frame, int exc) {\n return NULL;\n}\n#define CALL_EvalFrameDefault_39(a, b, c) noop(a, b, c)\n#define CALL_EvalFrameDefault_38(a, b) _PyEval_EvalFrameDefault(a, b)\n#endif\n '''\n\n int _PyEval_RequestCodeExtraIndex(freefunc)\n PyFrameObject *PyEval_GetFrame()\n PyObject* PyEval_CallFunction(PyObject *callable, const char *format, ...)\n\n # PyObject* _PyEval_EvalFrameDefault(PyThreadState* tstate, PyFrameObject *frame, int exc)\n # PyObject* _PyEval_EvalFrameDefault(PyFrameObject *frame, int exc)\n PyObject* _PyEval_EvalFrameDefault(...)\n PyObject* CALL_EvalFrameDefault_38(PyFrameObject *frame, int exc) # Actually a macro.\n PyObject* CALL_EvalFrameDefault_39(PyThreadState* tstate, PyFrameObject *frame, int exc) # Actually a macro.\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\pydevd_frame_evaluator.pxd
pydevd_frame_evaluator.pxd
Other
5,455
0.95
0.061069
0.169643
react-lib
352
2024-09-07T05:34:34.180983
MIT
false
31d449bc7c66c72582a7f05857463119
from __future__ import print_function\nfrom _pydev_bundle._pydev_saved_modules import threading, thread\nfrom _pydevd_bundle.pydevd_constants import GlobalDebuggerHolder\nimport dis\nimport sys\nfrom _pydevd_frame_eval.pydevd_frame_tracing import update_globals_dict, dummy_tracing_holder\nfrom _pydevd_frame_eval.pydevd_modify_bytecode import DebugHelper, insert_pydevd_breaks\nfrom pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER\nfrom _pydevd_bundle.pydevd_trace_dispatch import fix_top_level_trace_and_get_trace_func\n\nfrom _pydevd_bundle.pydevd_additional_thread_info import _set_additional_thread_info_lock\nfrom _pydevd_bundle.pydevd_cython cimport PyDBAdditionalThreadInfo\nfrom pydevd_tracing import SetTrace\n\n_get_ident = threading.get_ident # Note this is py3 only, if py2 needed to be supported, _get_ident would be needed.\n_thread_local_info = threading.local()\n_thread_active = threading._active\n\ndef clear_thread_local_info():\n global _thread_local_info\n _thread_local_info = threading.local()\n\n\ncdef class ThreadInfo:\n\n cdef public PyDBAdditionalThreadInfo additional_info\n cdef public bint is_pydevd_thread\n cdef public int inside_frame_eval\n cdef public bint fully_initialized\n cdef public object thread_trace_func\n cdef bint _can_create_dummy_thread\n\n # Note: whenever get_func_code_info is called, this value is reset (we're using\n # it as a thread-local value info).\n # If True the debugger should not go into trace mode even if the new\n # code for a function is None and there are breakpoints.\n cdef public bint force_stay_in_untraced_mode\n\n cdef initialize(self, PyFrameObject * frame_obj):\n # Places that create a ThreadInfo should verify that\n # a current Python frame is being executed!\n assert frame_obj != NULL\n\n self.additional_info = None\n self.is_pydevd_thread = False\n self.inside_frame_eval = 0\n self.fully_initialized = False\n self.thread_trace_func = None\n\n # Get the root (if it's not a Thread initialized from the threading\n # module, create the dummy thread entry so that we can debug it --\n # otherwise, we have to wait for the threading module itself to\n # create the Thread entry).\n while frame_obj.f_back != NULL:\n frame_obj = frame_obj.f_back\n\n basename = <str> frame_obj.f_code.co_filename\n i = basename.rfind('/')\n j = basename.rfind('\\')\n if j > i:\n i = j\n if i >= 0:\n basename = basename[i + 1:]\n # remove ext\n i = basename.rfind('.')\n if i >= 0:\n basename = basename[:i]\n\n co_name = <str> frame_obj.f_code.co_name\n\n # In these cases we cannot create a dummy thread (an actual\n # thread will be created later or tracing will already be set).\n if basename == 'threading' and co_name in ('__bootstrap', '_bootstrap', '__bootstrap_inner', '_bootstrap_inner'):\n self._can_create_dummy_thread = False\n elif basename == 'pydev_monkey' and co_name == '__call__':\n self._can_create_dummy_thread = False\n elif basename == 'pydevd' and co_name in ('run', 'main', '_exec'):\n self._can_create_dummy_thread = False\n elif basename == 'pydevd_tracing':\n self._can_create_dummy_thread = False\n else:\n self._can_create_dummy_thread = True\n\n # print('Can create dummy thread for thread started in: %s %s' % (basename, co_name))\n\n cdef initialize_if_possible(self):\n # Don't call threading.currentThread because if we're too early in the process\n # we may create a dummy thread.\n self.inside_frame_eval += 1\n\n try:\n thread_ident = _get_ident()\n t = _thread_active.get(thread_ident)\n if t is None:\n if self._can_create_dummy_thread:\n # Initialize the dummy thread and set the tracing (both are needed to\n # actually stop on breakpoints).\n t = threading.current_thread()\n SetTrace(dummy_trace_dispatch)\n else:\n return # Cannot initialize until thread becomes active.\n\n if getattr(t, 'is_pydev_daemon_thread', False):\n self.is_pydevd_thread = True\n self.fully_initialized = True\n else:\n try:\n additional_info = t.additional_info\n if additional_info is None:\n raise AttributeError()\n except:\n with _set_additional_thread_info_lock:\n # If it's not there, set it within a lock to avoid any racing\n # conditions.\n additional_info = getattr(thread, 'additional_info', None)\n if additional_info is None:\n additional_info = PyDBAdditionalThreadInfo()\n t.additional_info = additional_info\n self.additional_info = additional_info\n self.fully_initialized = True\n finally:\n self.inside_frame_eval -= 1\n\n\ncdef class FuncCodeInfo:\n\n cdef public str co_filename\n cdef public str co_name\n cdef public str canonical_normalized_filename\n cdef bint always_skip_code\n cdef public bint breakpoint_found\n cdef public object new_code\n\n # When breakpoints_mtime != PyDb.mtime the validity of breakpoints have\n # to be re-evaluated (if invalid a new FuncCodeInfo must be created and\n # tracing can't be disabled for the related frames).\n cdef public int breakpoints_mtime\n\n def __init__(self):\n self.co_filename = ''\n self.canonical_normalized_filename = ''\n self.always_skip_code = False\n\n # If breakpoints are found but new_code is None,\n # this means we weren't able to actually add the code\n # where needed, so, fallback to tracing.\n self.breakpoint_found = False\n self.new_code = None\n self.breakpoints_mtime = -1\n\n\ndef dummy_trace_dispatch(frame, str event, arg):\n if event == 'call':\n if frame.f_trace is not None:\n return frame.f_trace(frame, event, arg)\n return None\n\n\ndef get_thread_info_py() -> ThreadInfo:\n return get_thread_info(PyEval_GetFrame())\n\n\ncdef ThreadInfo get_thread_info(PyFrameObject * frame_obj):\n '''\n Provides thread-related info.\n\n May return None if the thread is still not active.\n '''\n cdef ThreadInfo thread_info\n try:\n # Note: changing to a `dict[thread.ident] = thread_info` had almost no\n # effect in the performance.\n thread_info = _thread_local_info.thread_info\n except:\n if frame_obj == NULL:\n return None\n thread_info = ThreadInfo()\n thread_info.initialize(frame_obj)\n thread_info.inside_frame_eval += 1\n try:\n _thread_local_info.thread_info = thread_info\n\n # Note: _code_extra_index is not actually thread-related,\n # but this is a good point to initialize it.\n global _code_extra_index\n if _code_extra_index == -1:\n _code_extra_index = <int> _PyEval_RequestCodeExtraIndex(release_co_extra)\n\n thread_info.initialize_if_possible()\n finally:\n thread_info.inside_frame_eval -= 1\n\n return thread_info\n\n\ndef decref_py(obj):\n '''\n Helper to be called from Python.\n '''\n Py_DECREF(obj)\n\n\ndef get_func_code_info_py(thread_info, frame, code_obj) -> FuncCodeInfo:\n '''\n Helper to be called from Python.\n '''\n return get_func_code_info(<ThreadInfo> thread_info, <PyFrameObject *> frame, <PyCodeObject *> code_obj)\n\n\ncdef int _code_extra_index = -1\n\ncdef FuncCodeInfo get_func_code_info(ThreadInfo thread_info, PyFrameObject * frame_obj, PyCodeObject * code_obj):\n '''\n Provides code-object related info.\n\n Stores the gathered info in a cache in the code object itself. Note that\n multiple threads can get the same info.\n\n get_thread_info() *must* be called at least once before get_func_code_info()\n to initialize _code_extra_index.\n\n '''\n # f_code = <object> code_obj\n # DEBUG = f_code.co_filename.endswith('_debugger_case_multiprocessing.py')\n # if DEBUG:\n # print('get_func_code_info', f_code.co_name, f_code.co_filename)\n\n cdef object main_debugger = GlobalDebuggerHolder.global_dbg\n thread_info.force_stay_in_untraced_mode = False # This is an output value of the function.\n\n cdef PyObject * extra\n _PyCode_GetExtra(<PyObject *> code_obj, _code_extra_index, & extra)\n if extra is not NULL:\n extra_obj = <PyObject *> extra\n if extra_obj is not NULL:\n func_code_info_obj = <FuncCodeInfo> extra_obj\n if func_code_info_obj.breakpoints_mtime == main_debugger.mtime:\n # if DEBUG:\n # print('get_func_code_info: matched mtime', f_code.co_name, f_code.co_filename)\n\n return func_code_info_obj\n\n cdef str co_filename = <str> code_obj.co_filename\n cdef str co_name = <str> code_obj.co_name\n cdef dict cache_file_type\n cdef tuple cache_file_type_key\n\n func_code_info = FuncCodeInfo()\n func_code_info.breakpoints_mtime = main_debugger.mtime\n\n func_code_info.co_filename = co_filename\n func_code_info.co_name = co_name\n\n if not func_code_info.always_skip_code:\n try:\n abs_path_real_path_and_base = NORM_PATHS_AND_BASE_CONTAINER[co_filename]\n except:\n abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(<object>frame_obj)\n\n func_code_info.canonical_normalized_filename = abs_path_real_path_and_base[1]\n\n cache_file_type = main_debugger.get_cache_file_type()\n # Note: this cache key must be the same from PyDB.get_file_type() -- see it for comments\n # on the cache.\n cache_file_type_key = (frame_obj.f_code.co_firstlineno, abs_path_real_path_and_base[0], <object>frame_obj.f_code)\n try:\n file_type = cache_file_type[cache_file_type_key] # Make it faster\n except:\n file_type = main_debugger.get_file_type(<object>frame_obj, abs_path_real_path_and_base) # we don't want to debug anything related to pydevd\n\n if file_type is not None:\n func_code_info.always_skip_code = True\n\n if not func_code_info.always_skip_code:\n if main_debugger is not None:\n\n breakpoints: dict = main_debugger.breakpoints.get(func_code_info.canonical_normalized_filename)\n function_breakpoint: object = main_debugger.function_breakpoint_name_to_breakpoint.get(func_code_info.co_name)\n # print('\n---')\n # print(main_debugger.breakpoints)\n # print(func_code_info.canonical_normalized_filename)\n # print(main_debugger.breakpoints.get(func_code_info.canonical_normalized_filename))\n code_obj_py: object = <object> code_obj\n cached_code_obj_info: object = _cache.get(code_obj_py)\n if cached_code_obj_info:\n # The cache is for new code objects, so, in this case it's already\n # using the new code and we can't change it as this is a generator!\n # There's still a catch though: even though we don't replace the code,\n # we may not want to go into tracing mode (as would usually happen\n # when the new_code is None).\n func_code_info.new_code = None\n breakpoint_found, thread_info.force_stay_in_untraced_mode = \\n cached_code_obj_info.compute_force_stay_in_untraced_mode(breakpoints)\n func_code_info.breakpoint_found = breakpoint_found\n\n elif function_breakpoint:\n # Go directly into tracing mode\n func_code_info.breakpoint_found = True\n func_code_info.new_code = None\n \n elif breakpoints:\n # if DEBUG:\n # print('found breakpoints', code_obj_py.co_name, breakpoints)\n\n # Note: new_code can be None if unable to generate.\n # It should automatically put the new code object in the cache.\n breakpoint_found, func_code_info.new_code = generate_code_with_breakpoints(code_obj_py, breakpoints)\n func_code_info.breakpoint_found = breakpoint_found\n\n Py_INCREF(func_code_info)\n _PyCode_SetExtra(<PyObject *> code_obj, _code_extra_index, <PyObject *> func_code_info)\n\n return func_code_info\n\n\ncdef class _CodeLineInfo:\n\n cdef public dict line_to_offset\n cdef public int first_line\n cdef public int last_line\n\n def __init__(self, dict line_to_offset, int first_line, int last_line):\n self.line_to_offset = line_to_offset\n self.first_line = first_line\n self.last_line = last_line\n\n\n# Note: this method has a version in pure-python too.\ndef _get_code_line_info(code_obj):\n line_to_offset: dict = {}\n first_line: int = None\n last_line: int = None\n\n cdef int offset\n cdef int line\n\n for offset, line in dis.findlinestarts(code_obj):\n line_to_offset[line] = offset\n\n if line_to_offset:\n first_line = min(line_to_offset)\n last_line = max(line_to_offset)\n return _CodeLineInfo(line_to_offset, first_line, last_line)\n\n\n# Note: this is a cache where the key is the code objects we create ourselves so that\n# we always return the same code object for generators.\n# (so, we don't have a cache from the old code to the new info -- that's actually\n# handled by the cython side in `FuncCodeInfo get_func_code_info` by providing the\n# same code info if the debugger mtime is still the same).\n_cache: dict = {}\n\ndef get_cached_code_obj_info_py(code_obj_py):\n '''\n :return _CacheValue:\n :note: on cython use _cache.get(code_obj_py) directly.\n '''\n return _cache.get(code_obj_py)\n\n\ncdef class _CacheValue(object):\n\n cdef public object code_obj_py\n cdef public _CodeLineInfo code_line_info\n cdef public set breakpoints_hit_at_lines\n cdef public set code_lines_as_set\n\n def __init__(self, object code_obj_py, _CodeLineInfo code_line_info, set breakpoints_hit_at_lines):\n '''\n :param code_obj_py:\n :param _CodeLineInfo code_line_info:\n :param set[int] breakpoints_hit_at_lines:\n '''\n self.code_obj_py = code_obj_py\n self.code_line_info = code_line_info\n self.breakpoints_hit_at_lines = breakpoints_hit_at_lines\n self.code_lines_as_set = set(code_line_info.line_to_offset)\n\n cpdef compute_force_stay_in_untraced_mode(self, breakpoints):\n '''\n :param breakpoints:\n set(breakpoint_lines) or dict(breakpoint_line->breakpoint info)\n :return tuple(breakpoint_found, force_stay_in_untraced_mode)\n '''\n cdef bint force_stay_in_untraced_mode\n cdef bint breakpoint_found\n cdef set target_breakpoints\n\n force_stay_in_untraced_mode = False\n\n target_breakpoints = self.code_lines_as_set.intersection(breakpoints)\n breakpoint_found = bool(target_breakpoints)\n\n if not breakpoint_found:\n force_stay_in_untraced_mode = True\n else:\n force_stay_in_untraced_mode = self.breakpoints_hit_at_lines.issuperset(set(breakpoints))\n\n return breakpoint_found, force_stay_in_untraced_mode\n\ndef generate_code_with_breakpoints_py(object code_obj_py, dict breakpoints):\n return generate_code_with_breakpoints(code_obj_py, breakpoints)\n\n# DEBUG = True\n# debug_helper = DebugHelper()\n\ncdef generate_code_with_breakpoints(object code_obj_py, dict breakpoints):\n '''\n :param breakpoints:\n dict where the keys are the breakpoint lines.\n :return tuple(breakpoint_found, new_code)\n '''\n # The cache is needed for generator functions, because after each yield a new frame\n # is created but the former code object is used (so, check if code_to_modify is\n # already there and if not cache based on the new code generated).\n\n cdef bint success\n cdef int breakpoint_line\n cdef bint breakpoint_found\n cdef _CacheValue cache_value\n cdef set breakpoints_hit_at_lines\n cdef dict line_to_offset\n\n assert code_obj_py not in _cache, 'If a code object is cached, that same code object must be reused.'\n\n# if DEBUG:\n# initial_code_obj_py = code_obj_py\n\n code_line_info = _get_code_line_info(code_obj_py)\n\n success = True\n\n breakpoints_hit_at_lines = set()\n line_to_offset = code_line_info.line_to_offset\n\n for breakpoint_line in breakpoints:\n if breakpoint_line in line_to_offset:\n breakpoints_hit_at_lines.add(breakpoint_line)\n\n if breakpoints_hit_at_lines:\n success, new_code = insert_pydevd_breaks(\n code_obj_py,\n breakpoints_hit_at_lines,\n code_line_info\n )\n\n if not success:\n code_obj_py = None\n else:\n code_obj_py = new_code\n\n breakpoint_found = bool(breakpoints_hit_at_lines)\n if breakpoint_found and success:\n# if DEBUG:\n# op_number = debug_helper.write_dis(\n# 'inserting code, breaks at: %s' % (list(breakpoints),),\n# initial_code_obj_py\n# )\n#\n# debug_helper.write_dis(\n# 'after inserting code, breaks at: %s' % (list(breakpoints,)),\n# code_obj_py,\n# op_number=op_number,\n# )\n\n cache_value = _CacheValue(code_obj_py, code_line_info, breakpoints_hit_at_lines)\n _cache[code_obj_py] = cache_value\n\n return breakpoint_found, code_obj_py\n\nimport sys\n\ncdef bint IS_PY_39_OWNARDS = sys.version_info[:2] >= (3, 9)\n\ndef frame_eval_func():\n cdef PyThreadState *state = PyThreadState_Get()\n if IS_PY_39_OWNARDS:\n state.interp.eval_frame = <_PyFrameEvalFunction *> get_bytecode_while_frame_eval_39\n else:\n state.interp.eval_frame = <_PyFrameEvalFunction *> get_bytecode_while_frame_eval_38\n dummy_tracing_holder.set_trace_func(dummy_trace_dispatch)\n\n\ndef stop_frame_eval():\n cdef PyThreadState *state = PyThreadState_Get()\n state.interp.eval_frame = _PyEval_EvalFrameDefault\n\n# During the build we'll generate 2 versions of the code below so that we're compatible with\n# Python 3.9, which receives a "PyThreadState* tstate" as the first parameter and Python 3.6-3.8\n# which doesn't.\n### TEMPLATE_START\ncdef PyObject * get_bytecode_while_frame_eval(PyFrameObject * frame_obj, int exc):\n '''\n This function makes the actual evaluation and changes the bytecode to a version\n where programmatic breakpoints are added.\n '''\n if GlobalDebuggerHolder is None or _thread_local_info is None or exc:\n # Sometimes during process shutdown these global variables become None\n return CALL_EvalFrameDefault\n\n # co_filename: str = <str>frame_obj.f_code.co_filename\n # if co_filename.endswith('threading.py'):\n # return CALL_EvalFrameDefault\n\n cdef ThreadInfo thread_info\n cdef int STATE_SUSPEND = 2\n cdef int CMD_STEP_INTO = 107\n cdef int CMD_STEP_OVER = 108\n cdef int CMD_STEP_OVER_MY_CODE = 159\n cdef int CMD_STEP_INTO_MY_CODE = 144\n cdef int CMD_STEP_INTO_COROUTINE = 206\n cdef int CMD_SMART_STEP_INTO = 128\n cdef bint can_skip = True\n try:\n thread_info = _thread_local_info.thread_info\n except:\n thread_info = get_thread_info(frame_obj)\n if thread_info is None:\n return CALL_EvalFrameDefault\n\n if thread_info.inside_frame_eval:\n return CALL_EvalFrameDefault\n\n if not thread_info.fully_initialized:\n thread_info.initialize_if_possible()\n if not thread_info.fully_initialized:\n return CALL_EvalFrameDefault\n\n # Can only get additional_info when fully initialized.\n cdef PyDBAdditionalThreadInfo additional_info = thread_info.additional_info\n if thread_info.is_pydevd_thread or additional_info.is_tracing:\n # Make sure that we don't trace pydevd threads or inside our own calls.\n return CALL_EvalFrameDefault\n\n # frame = <object> frame_obj\n # DEBUG = frame.f_code.co_filename.endswith('_debugger_case_tracing.py')\n # if DEBUG:\n # print('get_bytecode_while_frame_eval', frame.f_lineno, frame.f_code.co_name, frame.f_code.co_filename)\n\n thread_info.inside_frame_eval += 1\n additional_info.is_tracing = True\n try:\n main_debugger: object = GlobalDebuggerHolder.global_dbg\n if main_debugger is None:\n return CALL_EvalFrameDefault\n frame = <object> frame_obj\n\n if thread_info.thread_trace_func is None:\n trace_func, apply_to_global = fix_top_level_trace_and_get_trace_func(main_debugger, frame)\n if apply_to_global:\n thread_info.thread_trace_func = trace_func\n\n if additional_info.pydev_step_cmd in (CMD_STEP_INTO, CMD_STEP_INTO_MY_CODE, CMD_STEP_INTO_COROUTINE, CMD_SMART_STEP_INTO) or \\n main_debugger.break_on_caught_exceptions or \\n main_debugger.break_on_user_uncaught_exceptions or \\n main_debugger.has_plugin_exception_breaks or \\n main_debugger.signature_factory or \\n additional_info.pydev_step_cmd in (CMD_STEP_OVER, CMD_STEP_OVER_MY_CODE) and main_debugger.show_return_values and frame.f_back is additional_info.pydev_step_stop:\n\n # if DEBUG:\n # print('get_bytecode_while_frame_eval enabled trace')\n if thread_info.thread_trace_func is not None:\n frame.f_trace = thread_info.thread_trace_func\n else:\n frame.f_trace = <object> main_debugger.trace_dispatch\n else:\n func_code_info: FuncCodeInfo = get_func_code_info(thread_info, frame_obj, frame_obj.f_code)\n # if DEBUG:\n # print('get_bytecode_while_frame_eval always skip', func_code_info.always_skip_code)\n if not func_code_info.always_skip_code:\n\n if main_debugger.has_plugin_line_breaks or main_debugger.has_plugin_exception_breaks:\n can_skip = main_debugger.plugin.can_skip(main_debugger, <object> frame_obj)\n\n if not can_skip:\n # if DEBUG:\n # print('get_bytecode_while_frame_eval not can_skip')\n if thread_info.thread_trace_func is not None:\n frame.f_trace = thread_info.thread_trace_func\n else:\n frame.f_trace = <object> main_debugger.trace_dispatch\n\n if can_skip and func_code_info.breakpoint_found:\n # if DEBUG:\n # print('get_bytecode_while_frame_eval new_code', func_code_info.new_code)\n if not thread_info.force_stay_in_untraced_mode:\n # If breakpoints are found but new_code is None,\n # this means we weren't able to actually add the code\n # where needed, so, fallback to tracing.\n if func_code_info.new_code is None:\n if thread_info.thread_trace_func is not None:\n frame.f_trace = thread_info.thread_trace_func\n else:\n frame.f_trace = <object> main_debugger.trace_dispatch\n else:\n # print('Using frame eval break for', <object> frame_obj.f_code.co_name)\n update_globals_dict(<object> frame_obj.f_globals)\n Py_INCREF(func_code_info.new_code)\n old = <object> frame_obj.f_code\n frame_obj.f_code = <PyCodeObject *> func_code_info.new_code\n Py_DECREF(old)\n else:\n # When we're forcing to stay in traced mode we need to\n # update the globals dict (because this means that we're reusing\n # a previous code which had breakpoints added in a new frame).\n update_globals_dict(<object> frame_obj.f_globals)\n\n finally:\n thread_info.inside_frame_eval -= 1\n additional_info.is_tracing = False\n\n return CALL_EvalFrameDefault\n### TEMPLATE_END\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\pydevd_frame_evaluator.template.pyx
pydevd_frame_evaluator.template.pyx
Other
25,163
0.95
0.177814
0.21371
vue-tools
461
2023-12-05T07:56:44.675436
Apache-2.0
false
01a726d5934cbf2069db1e9c08fcb290
try:\n try:\n from _pydevd_frame_eval_ext import pydevd_frame_evaluator as mod\n except ImportError:\n from _pydevd_frame_eval import pydevd_frame_evaluator as mod\n\nexcept ImportError:\n try:\n import sys\n\n try:\n is_64bits = sys.maxsize > 2**32\n except:\n # In Jython this call fails, but this is Ok, we don't support Jython for speedups anyways.\n raise ImportError\n plat = "32"\n if is_64bits:\n plat = "64"\n\n # We also accept things as:\n #\n # _pydevd_frame_eval.pydevd_frame_evaluator_win32_27_32\n # _pydevd_frame_eval.pydevd_frame_evaluator_win32_34_64\n #\n # to have multiple pre-compiled pyds distributed along the IDE\n # (generated by build_tools/build_binaries_windows.py).\n\n mod_name = "pydevd_frame_evaluator_%s_%s%s_%s" % (sys.platform, sys.version_info[0], sys.version_info[1], plat)\n check_name = "_pydevd_frame_eval.%s" % (mod_name,)\n mod = __import__(check_name)\n mod = getattr(mod, mod_name)\n except ImportError:\n raise\n\nframe_eval_func = mod.frame_eval_func\n\nstop_frame_eval = mod.stop_frame_eval\n\ndummy_trace_dispatch = mod.dummy_trace_dispatch\n\nget_thread_info_py = mod.get_thread_info_py\n\nclear_thread_local_info = mod.clear_thread_local_info\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\pydevd_frame_eval_cython_wrapper.py
pydevd_frame_eval_cython_wrapper.py
Python
1,384
0.95
0.139535
0.235294
python-kit
952
2024-03-28T12:52:46.634494
GPL-3.0
false
b4e6c9ae615a5433d4b0f6cf5347d77a
import os\n\nfrom _pydev_bundle import pydev_log\nfrom _pydevd_bundle.pydevd_trace_dispatch import USING_CYTHON\nfrom _pydevd_bundle.pydevd_constants import (\n USE_CYTHON_FLAG,\n ENV_FALSE_LOWER_VALUES,\n ENV_TRUE_LOWER_VALUES,\n IS_PY36_OR_GREATER,\n IS_PY38_OR_GREATER,\n SUPPORT_GEVENT,\n IS_PYTHON_STACKLESS,\n PYDEVD_USE_FRAME_EVAL,\n PYDEVD_IPYTHON_COMPATIBLE_DEBUGGING,\n IS_PY311_OR_GREATER,\n)\n\nframe_eval_func = None\nstop_frame_eval = None\ndummy_trace_dispatch = None\nclear_thread_local_info = None\n\n# "NO" means we should not use frame evaluation, 'YES' we should use it (and fail if not there) and unspecified uses if possible.\nif (\n PYDEVD_USE_FRAME_EVAL in ENV_FALSE_LOWER_VALUES\n or USE_CYTHON_FLAG in ENV_FALSE_LOWER_VALUES\n or not USING_CYTHON\n or\n # Frame eval mode does not work with ipython compatible debugging (this happens because the\n # way that frame eval works is run untraced and set tracing only for the frames with\n # breakpoints, but ipython compatible debugging creates separate frames for what's logically\n # the same frame).\n PYDEVD_IPYTHON_COMPATIBLE_DEBUGGING\n):\n USING_FRAME_EVAL = False\n\nelif SUPPORT_GEVENT or (IS_PYTHON_STACKLESS and not IS_PY38_OR_GREATER):\n USING_FRAME_EVAL = False\n # i.e gevent and frame eval mode don't get along very well.\n # https://github.com/microsoft/debugpy/issues/189\n # Same problem with Stackless.\n # https://github.com/stackless-dev/stackless/issues/240\n\nelif PYDEVD_USE_FRAME_EVAL in ENV_TRUE_LOWER_VALUES and not IS_PY311_OR_GREATER:\n # Python 3.11 onwards doesn't have frame eval mode implemented\n # Fail if unable to use\n from _pydevd_frame_eval.pydevd_frame_eval_cython_wrapper import (\n frame_eval_func,\n stop_frame_eval,\n dummy_trace_dispatch,\n clear_thread_local_info,\n )\n\n USING_FRAME_EVAL = True\n\nelse:\n USING_FRAME_EVAL = False\n # Try to use if possible\n if IS_PY36_OR_GREATER and not IS_PY311_OR_GREATER:\n # Python 3.11 onwards doesn't have frame eval mode implemented\n try:\n from _pydevd_frame_eval.pydevd_frame_eval_cython_wrapper import (\n frame_eval_func,\n stop_frame_eval,\n dummy_trace_dispatch,\n clear_thread_local_info,\n )\n\n USING_FRAME_EVAL = True\n except ImportError:\n pydev_log.show_compile_cython_command_line()\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\pydevd_frame_eval_main.py
pydevd_frame_eval_main.py
Python
2,515
0.95
0.126761
0.206349
python-kit
86
2023-11-28T23:30:39.793068
GPL-3.0
false
307e387b79c973560fa13c2dcd4a9bae
import sys\n\nfrom _pydev_bundle import pydev_log\nfrom _pydev_bundle._pydev_saved_modules import threading\nfrom _pydevd_bundle.pydevd_comm import get_global_debugger\nfrom pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER\nfrom _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info\n\n\nclass DummyTracingHolder:\n dummy_trace_func = None\n\n def set_trace_func(self, trace_func):\n self.dummy_trace_func = trace_func\n\n\ndummy_tracing_holder = DummyTracingHolder()\n\n\ndef update_globals_dict(globals_dict):\n new_globals = {"_pydev_stop_at_break": _pydev_stop_at_break}\n globals_dict.update(new_globals)\n\n\ndef _get_line_for_frame(frame):\n # it's absolutely necessary to reset tracing function for frame in order to get the real line number\n tracing_func = frame.f_trace\n frame.f_trace = None\n line = frame.f_lineno\n frame.f_trace = tracing_func\n return line\n\n\ndef _pydev_stop_at_break(line):\n frame = sys._getframe(1)\n # print('pydevd SET TRACING at ', line, 'curr line', frame.f_lineno)\n t = threading.current_thread()\n try:\n additional_info = t.additional_info\n except:\n additional_info = set_additional_thread_info(t)\n\n if additional_info.is_tracing:\n return\n\n additional_info.is_tracing += 1\n try:\n py_db = get_global_debugger()\n if py_db is None:\n return\n\n pydev_log.debug("Setting f_trace due to frame eval mode in file: %s on line %s", frame.f_code.co_filename, line)\n additional_info.trace_suspend_type = "frame_eval"\n\n pydevd_frame_eval_cython_wrapper = sys.modules["_pydevd_frame_eval.pydevd_frame_eval_cython_wrapper"]\n thread_info = pydevd_frame_eval_cython_wrapper.get_thread_info_py()\n if thread_info.thread_trace_func is not None:\n frame.f_trace = thread_info.thread_trace_func\n else:\n frame.f_trace = py_db.get_thread_local_trace_func()\n finally:\n additional_info.is_tracing -= 1\n\n\ndef _pydev_needs_stop_at_break(line):\n """\n We separate the functionality into 2 functions so that we can generate a bytecode which\n generates a spurious line change so that we can do:\n\n if _pydev_needs_stop_at_break():\n # Set line to line -1\n _pydev_stop_at_break()\n # then, proceed to go to the current line\n # (which will then trigger a line event).\n """\n t = threading.current_thread()\n try:\n additional_info = t.additional_info\n except:\n additional_info = set_additional_thread_info(t)\n\n if additional_info.is_tracing:\n return False\n\n additional_info.is_tracing += 1\n try:\n frame = sys._getframe(1)\n # print('pydev needs stop at break?', line, 'curr line', frame.f_lineno, 'curr trace', frame.f_trace)\n if frame.f_trace is not None:\n # i.e.: this frame is already being traced, thus, we don't need to use programmatic breakpoints.\n return False\n\n py_db = get_global_debugger()\n if py_db is None:\n return False\n\n try:\n abs_path_real_path_and_base = NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]\n except:\n abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(frame)\n canonical_normalized_filename = abs_path_real_path_and_base[1]\n\n try:\n python_breakpoint = py_db.breakpoints[canonical_normalized_filename][line]\n except:\n # print("Couldn't find breakpoint in the file %s on line %s" % (frame.f_code.co_filename, line))\n # Could be KeyError if line is not there or TypeError if breakpoints_for_file is None.\n # Note: using catch-all exception for performance reasons (if the user adds a breakpoint\n # and then removes it after hitting it once, this method added for the programmatic\n # breakpoint will keep on being called and one of those exceptions will always be raised\n # here).\n return False\n\n if python_breakpoint:\n # print('YES')\n return True\n\n finally:\n additional_info.is_tracing -= 1\n\n return False\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\pydevd_frame_tracing.py
pydevd_frame_tracing.py
Python
4,339
0.95
0.231405
0.148936
vue-tools
774
2025-06-01T08:32:53.027588
BSD-3-Clause
false
8ef32c018437e59a66f70c3c0c6deeec
from collections import namedtuple\nimport dis\nfrom functools import partial\nimport itertools\nimport os.path\nimport sys\n\nfrom _pydevd_frame_eval.vendored import bytecode\nfrom _pydevd_frame_eval.vendored.bytecode.instr import Instr, Label\nfrom _pydev_bundle import pydev_log\nfrom _pydevd_frame_eval.pydevd_frame_tracing import _pydev_stop_at_break, _pydev_needs_stop_at_break\n\nDEBUG = False\n\n\nclass DebugHelper(object):\n def __init__(self):\n self._debug_dir = os.path.join(os.path.dirname(__file__), "debug_info")\n try:\n os.makedirs(self._debug_dir)\n except:\n pass\n self._next = partial(next, itertools.count(0))\n\n def _get_filename(self, op_number=None, prefix=""):\n if op_number is None:\n op_number = self._next()\n name = "%03d_before.txt" % op_number\n else:\n name = "%03d_change.txt" % op_number\n\n filename = os.path.join(self._debug_dir, prefix + name)\n return filename, op_number\n\n def write_bytecode(self, b, op_number=None, prefix=""):\n filename, op_number = self._get_filename(op_number, prefix)\n with open(filename, "w") as stream:\n bytecode.dump_bytecode(b, stream=stream, lineno=True)\n return op_number\n\n def write_dis(self, code_to_modify, op_number=None, prefix=""):\n filename, op_number = self._get_filename(op_number, prefix)\n with open(filename, "w") as stream:\n stream.write("-------- ")\n stream.write("-------- ")\n stream.write("id(code_to_modify): %s" % id(code_to_modify))\n stream.write("\n\n")\n dis.dis(code_to_modify, file=stream)\n return op_number\n\n\n_CodeLineInfo = namedtuple("_CodeLineInfo", "line_to_offset, first_line, last_line")\n\n\n# Note: this method has a version in cython too (that one is usually used, this is just for tests).\ndef _get_code_line_info(code_obj):\n line_to_offset = {}\n first_line = None\n last_line = None\n\n for offset, line in dis.findlinestarts(code_obj):\n if line is not None:\n line_to_offset[line] = offset\n\n if line_to_offset:\n first_line = min(line_to_offset)\n last_line = max(line_to_offset)\n return _CodeLineInfo(line_to_offset, first_line, last_line)\n\n\nif DEBUG:\n debug_helper = DebugHelper()\n\n\ndef get_instructions_to_add(stop_at_line, _pydev_stop_at_break=_pydev_stop_at_break, _pydev_needs_stop_at_break=_pydev_needs_stop_at_break):\n """\n This is the bytecode for something as:\n\n if _pydev_needs_stop_at_break():\n _pydev_stop_at_break()\n\n but with some special handling for lines.\n """\n # Good reference to how things work regarding line numbers and jumps:\n # https://github.com/python/cpython/blob/3.6/Objects/lnotab_notes.txt\n\n # Usually use a stop line -1, but if that'd be 0, using line +1 is ok too.\n spurious_line = stop_at_line - 1\n if spurious_line <= 0:\n spurious_line = stop_at_line + 1\n\n label = Label()\n return [\n # -- if _pydev_needs_stop_at_break():\n Instr("LOAD_CONST", _pydev_needs_stop_at_break, lineno=stop_at_line),\n Instr("LOAD_CONST", stop_at_line, lineno=stop_at_line),\n Instr("CALL_FUNCTION", 1, lineno=stop_at_line),\n Instr("POP_JUMP_IF_FALSE", label, lineno=stop_at_line),\n # -- _pydev_stop_at_break()\n #\n # Note that this has line numbers -1 so that when the NOP just below\n # is executed we have a spurious line event.\n Instr("LOAD_CONST", _pydev_stop_at_break, lineno=spurious_line),\n Instr("LOAD_CONST", stop_at_line, lineno=spurious_line),\n Instr("CALL_FUNCTION", 1, lineno=spurious_line),\n Instr("POP_TOP", lineno=spurious_line),\n # Reason for the NOP: Python will give us a 'line' trace event whenever we forward jump to\n # the first instruction of a line, so, in the case where we haven't added a programmatic\n # breakpoint (either because we didn't hit a breakpoint anymore or because it was already\n # tracing), we don't want the spurious line event due to the line change, so, we make a jump\n # to the instruction right after the NOP so that the spurious line event is NOT generated in\n # this case (otherwise we'd have a line event even if the line didn't change).\n Instr("NOP", lineno=stop_at_line),\n label,\n ]\n\n\nclass _Node(object):\n def __init__(self, data):\n self.prev = None\n self.next = None\n self.data = data\n\n def append(self, data):\n node = _Node(data)\n\n curr_next = self.next\n\n node.next = self.next\n node.prev = self\n self.next = node\n\n if curr_next is not None:\n curr_next.prev = node\n\n return node\n\n def prepend(self, data):\n node = _Node(data)\n\n curr_prev = self.prev\n\n node.prev = self.prev\n node.next = self\n self.prev = node\n\n if curr_prev is not None:\n curr_prev.next = node\n\n return node\n\n\nclass _HelperBytecodeList(object):\n """\n A helper double-linked list to make the manipulation a bit easier (so that we don't need\n to keep track of indices that change) and performant (because adding multiple items to\n the middle of a regular list isn't ideal).\n """\n\n def __init__(self, lst=None):\n self._head = None\n self._tail = None\n if lst:\n node = self\n for item in lst:\n node = node.append(item)\n\n def append(self, data):\n if self._tail is None:\n node = _Node(data)\n self._head = self._tail = node\n return node\n else:\n node = self._tail = self.tail.append(data)\n return node\n\n @property\n def head(self):\n node = self._head\n # Manipulating the node directly may make it unsynchronized.\n while node.prev:\n self._head = node = node.prev\n return node\n\n @property\n def tail(self):\n node = self._tail\n # Manipulating the node directly may make it unsynchronized.\n while node.next:\n self._tail = node = node.next\n return node\n\n def __iter__(self):\n node = self.head\n\n while node:\n yield node.data\n node = node.next\n\n\n_PREDICT_TABLE = {\n "LIST_APPEND": ("JUMP_ABSOLUTE",),\n "SET_ADD": ("JUMP_ABSOLUTE",),\n "GET_ANEXT": ("LOAD_CONST",),\n "GET_AWAITABLE": ("LOAD_CONST",),\n "DICT_MERGE": ("CALL_FUNCTION_EX",),\n "MAP_ADD": ("JUMP_ABSOLUTE",),\n "COMPARE_OP": (\n "POP_JUMP_IF_FALSE",\n "POP_JUMP_IF_TRUE",\n ),\n "IS_OP": (\n "POP_JUMP_IF_FALSE",\n "POP_JUMP_IF_TRUE",\n ),\n "CONTAINS_OP": (\n "POP_JUMP_IF_FALSE",\n "POP_JUMP_IF_TRUE",\n ),\n # Note: there are some others with PREDICT on ceval, but they have more logic\n # and it needs more experimentation to know how it behaves in the static generated\n # code (and it's only an issue for us if there's actually a line change between\n # those, so, we don't have to really handle all the cases, only the one where\n # the line number actually changes from one instruction to the predicted one).\n}\n\n# 3.10 optimizations include copying code branches multiple times (for instance\n# if the body of a finally has a single assign statement it can copy the assign to the case\n# where an exception happens and doesn't happen for optimization purposes) and as such\n# we need to add the programmatic breakpoint multiple times.\nTRACK_MULTIPLE_BRANCHES = sys.version_info[:2] >= (3, 10)\n\n# When tracking multiple branches, we try to fix the bytecodes which would be PREDICTED in the\n# Python eval loop so that we don't have spurious line events that wouldn't usually be issued\n# in the tracing as they're ignored due to the eval prediction (even though they're in the bytecode).\nFIX_PREDICT = sys.version_info[:2] >= (3, 10)\n\n\ndef insert_pydevd_breaks(\n code_to_modify,\n breakpoint_lines,\n code_line_info=None,\n _pydev_stop_at_break=_pydev_stop_at_break,\n _pydev_needs_stop_at_break=_pydev_needs_stop_at_break,\n):\n """\n Inserts pydevd programmatic breaks into the code (at the given lines).\n\n :param breakpoint_lines: set with the lines where we should add breakpoints.\n :return: tuple(boolean flag whether insertion was successful, modified code).\n """\n if code_line_info is None:\n code_line_info = _get_code_line_info(code_to_modify)\n\n if not code_line_info.line_to_offset:\n return False, code_to_modify\n\n # Create a copy (and make sure we're dealing with a set).\n breakpoint_lines = set(breakpoint_lines)\n\n # Note that we can even generate breakpoints on the first line of code\n # now, since we generate a spurious line event -- it may be a bit pointless\n # as we'll stop in the first line and we don't currently stop the tracing after the\n # user resumes, but in the future, if we do that, this would be a nice\n # improvement.\n # if code_to_modify.co_firstlineno in breakpoint_lines:\n # return False, code_to_modify\n\n for line in breakpoint_lines:\n if line <= 0:\n # The first line is line 1, so, a break at line 0 is not valid.\n pydev_log.info("Trying to add breakpoint in invalid line: %s", line)\n return False, code_to_modify\n\n try:\n b = bytecode.Bytecode.from_code(code_to_modify)\n\n if DEBUG:\n op_number_bytecode = debug_helper.write_bytecode(b, prefix="bytecode.")\n\n helper_list = _HelperBytecodeList(b)\n\n modified_breakpoint_lines = breakpoint_lines.copy()\n\n curr_node = helper_list.head\n added_breaks_in_lines = set()\n last_lineno = None\n while curr_node is not None:\n instruction = curr_node.data\n instruction_lineno = getattr(instruction, "lineno", None)\n curr_name = getattr(instruction, "name", None)\n\n if FIX_PREDICT:\n predict_targets = _PREDICT_TABLE.get(curr_name)\n if predict_targets:\n # Odd case: the next instruction may have a line number but it doesn't really\n # appear in the tracing due to the PREDICT() in ceval, so, fix the bytecode so\n # that it does things the way that ceval actually interprets it.\n # See: https://mail.python.org/archives/list/python-dev@python.org/thread/CP2PTFCMTK57KM3M3DLJNWGO66R5RVPB/\n next_instruction = curr_node.next.data\n next_name = getattr(next_instruction, "name", None)\n if next_name in predict_targets:\n next_instruction_lineno = getattr(next_instruction, "lineno", None)\n if next_instruction_lineno:\n next_instruction.lineno = None\n\n if instruction_lineno is not None:\n if TRACK_MULTIPLE_BRANCHES:\n if last_lineno is None:\n last_lineno = instruction_lineno\n else:\n if last_lineno == instruction_lineno:\n # If the previous is a label, someone may jump into it, so, we need to add\n # the break even if it's in the same line.\n if curr_node.prev.data.__class__ != Label:\n # Skip adding this as the line is still the same.\n curr_node = curr_node.next\n continue\n last_lineno = instruction_lineno\n else:\n if instruction_lineno in added_breaks_in_lines:\n curr_node = curr_node.next\n continue\n\n if instruction_lineno in modified_breakpoint_lines:\n added_breaks_in_lines.add(instruction_lineno)\n if curr_node.prev is not None and curr_node.prev.data.__class__ == Label and curr_name == "POP_TOP":\n # If we have a SETUP_FINALLY where the target is a POP_TOP, we can't change\n # the target to be the breakpoint instruction (this can crash the interpreter).\n\n for new_instruction in get_instructions_to_add(\n instruction_lineno,\n _pydev_stop_at_break=_pydev_stop_at_break,\n _pydev_needs_stop_at_break=_pydev_needs_stop_at_break,\n ):\n curr_node = curr_node.append(new_instruction)\n\n else:\n for new_instruction in get_instructions_to_add(\n instruction_lineno,\n _pydev_stop_at_break=_pydev_stop_at_break,\n _pydev_needs_stop_at_break=_pydev_needs_stop_at_break,\n ):\n curr_node.prepend(new_instruction)\n\n curr_node = curr_node.next\n\n b[:] = helper_list\n\n if DEBUG:\n debug_helper.write_bytecode(b, op_number_bytecode, prefix="bytecode.")\n\n new_code = b.to_code()\n\n except:\n pydev_log.exception("Error inserting pydevd breaks.")\n return False, code_to_modify\n\n if DEBUG:\n op_number = debug_helper.write_dis(code_to_modify)\n debug_helper.write_dis(new_code, op_number)\n\n return True, new_code\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\pydevd_modify_bytecode.py
pydevd_modify_bytecode.py
Python
13,908
0.95
0.201102
0.16041
python-kit
538
2024-01-28T19:38:25.261898
Apache-2.0
false
9623d61f3f290793d6de8e1f12ee6883
#include "Python.h"\n\nvoid release_co_extra(void *obj) {\n Py_XDECREF(obj);\n}\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\release_mem.h
release_mem.h
C
84
0.6
0
0.25
vue-tools
774
2024-01-24T17:57:17.476856
GPL-3.0
false
43e20e042879bb5a773fc7d9da67d0d2
def _fix_contents(filename, contents):\n import re\n\n contents = re.sub(r"from bytecode", r"from _pydevd_frame_eval.vendored.bytecode", contents, flags=re.MULTILINE)\n\n contents = re.sub(r"import bytecode", r"from _pydevd_frame_eval.vendored import bytecode", contents, flags=re.MULTILINE)\n\n # This test will import the wrong setup (we're not interested in it).\n contents = re.sub(r"def test_version\(self\):", r"def skip_test_version(self):", contents, flags=re.MULTILINE)\n\n if filename.startswith("test_"):\n if "pytestmark" not in contents:\n pytest_mark = """\nimport pytest\nfrom tests_python.debugger_unittest import IS_PY36_OR_GREATER, IS_CPYTHON\nfrom tests_python.debug_constants import TEST_CYTHON\npytestmark = pytest.mark.skipif(not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON, reason='Requires CPython >= 3.6')\n"""\n contents = pytest_mark + contents\n return contents\n\n\ndef main():\n import os\n\n # traverse root directory, and list directories as dirs and files as files\n for root, dirs, files in os.walk(os.path.dirname(__file__)):\n path = root.split(os.sep)\n for filename in files:\n if filename.endswith(".py") and filename != "pydevd_fix_code.py":\n with open(os.path.join(root, filename), "r") as stream:\n contents = stream.read()\n\n new_contents = _fix_contents(filename, contents)\n if contents != new_contents:\n print("fixed ", os.path.join(root, filename))\n with open(os.path.join(root, filename), "w") as stream:\n stream.write(new_contents)\n\n\n# print(len(path) * '---', filename)\n\n\nif __name__ == "__main__":\n main()\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\pydevd_fix_code.py
pydevd_fix_code.py
Python
1,806
0.95
0.244444
0.090909
python-kit
962
2024-03-02T17:54:59.619220
MIT
false
7215b3d3e73d85855765a58fa7b665f5
This folder contains vendored dependencies of the debugger.\n\nRight now this means the 'bytecode' library (MIT license).\n\nTo update the version remove the bytecode* contents from this folder and then use:\n\npip install bytecode --target .\n\nor from master (if needed for some early bugfix):\n\npython -m pip install https://github.com/MatthieuDartiailh/bytecode/archive/main.zip --target .\n\nThen run 'pydevd_fix_code.py' to fix the imports on the vendored file, run its tests (to see\nif things are still ok) and commit.\n\nThen, to finish, apply the patch to add the offset to the instructions (bcb8a28669e9178f96f5d71af7259e0674acc47c)\n\nNote: commit the egg-info as a note of the license (force if needed).
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\README.txt
README.txt
Other
717
0.95
0.235294
0
node-utils
924
2024-02-15T00:26:29.954870
GPL-3.0
false
6edc399c422f01f71a57ed0da978ad63
# alias to keep the 'bytecode' variable free\nimport sys\nfrom _pydevd_frame_eval.vendored import bytecode as _bytecode\nfrom _pydevd_frame_eval.vendored.bytecode.instr import UNSET, Label, SetLineno, Instr\nfrom _pydevd_frame_eval.vendored.bytecode.flags import infer_flags\n\n\nclass BaseBytecode:\n def __init__(self):\n self.argcount = 0\n if sys.version_info > (3, 8):\n self.posonlyargcount = 0\n self.kwonlyargcount = 0\n self.first_lineno = 1\n self.name = "<module>"\n self.filename = "<string>"\n self.docstring = UNSET\n self.cellvars = []\n # we cannot recreate freevars from instructions because of super()\n # special-case\n self.freevars = []\n self._flags = _bytecode.CompilerFlags(0)\n\n def _copy_attr_from(self, bytecode):\n self.argcount = bytecode.argcount\n if sys.version_info > (3, 8):\n self.posonlyargcount = bytecode.posonlyargcount\n self.kwonlyargcount = bytecode.kwonlyargcount\n self.flags = bytecode.flags\n self.first_lineno = bytecode.first_lineno\n self.name = bytecode.name\n self.filename = bytecode.filename\n self.docstring = bytecode.docstring\n self.cellvars = list(bytecode.cellvars)\n self.freevars = list(bytecode.freevars)\n\n def __eq__(self, other):\n if type(self) != type(other):\n return False\n\n if self.argcount != other.argcount:\n return False\n if sys.version_info > (3, 8):\n if self.posonlyargcount != other.posonlyargcount:\n return False\n if self.kwonlyargcount != other.kwonlyargcount:\n return False\n if self.flags != other.flags:\n return False\n if self.first_lineno != other.first_lineno:\n return False\n if self.filename != other.filename:\n return False\n if self.name != other.name:\n return False\n if self.docstring != other.docstring:\n return False\n if self.cellvars != other.cellvars:\n return False\n if self.freevars != other.freevars:\n return False\n if self.compute_stacksize() != other.compute_stacksize():\n return False\n\n return True\n\n @property\n def flags(self):\n return self._flags\n\n @flags.setter\n def flags(self, value):\n if not isinstance(value, _bytecode.CompilerFlags):\n value = _bytecode.CompilerFlags(value)\n self._flags = value\n\n def update_flags(self, *, is_async=None):\n self.flags = infer_flags(self, is_async)\n\n\nclass _BaseBytecodeList(BaseBytecode, list):\n """List subclass providing type stable slicing and copying."""\n\n def __getitem__(self, index):\n value = super().__getitem__(index)\n if isinstance(index, slice):\n value = type(self)(value)\n value._copy_attr_from(self)\n\n return value\n\n def copy(self):\n new = type(self)(super().copy())\n new._copy_attr_from(self)\n return new\n\n def legalize(self):\n """Check that all the element of the list are valid and remove SetLineno."""\n lineno_pos = []\n set_lineno = None\n current_lineno = self.first_lineno\n\n for pos, instr in enumerate(self):\n if isinstance(instr, SetLineno):\n set_lineno = instr.lineno\n lineno_pos.append(pos)\n continue\n # Filter out Labels\n if not isinstance(instr, Instr):\n continue\n if set_lineno is not None:\n instr.lineno = set_lineno\n elif instr.lineno is None:\n instr.lineno = current_lineno\n else:\n current_lineno = instr.lineno\n\n for i in reversed(lineno_pos):\n del self[i]\n\n def __iter__(self):\n instructions = super().__iter__()\n for instr in instructions:\n self._check_instr(instr)\n yield instr\n\n def _check_instr(self, instr):\n raise NotImplementedError()\n\n\nclass _InstrList(list):\n def _flat(self):\n instructions = []\n labels = {}\n jumps = []\n\n offset = 0\n for index, instr in enumerate(self):\n if isinstance(instr, Label):\n instructions.append("label_instr%s" % index)\n labels[instr] = offset\n else:\n if isinstance(instr, Instr) and isinstance(instr.arg, Label):\n target_label = instr.arg\n instr = _bytecode.ConcreteInstr(instr.name, 0, lineno=instr.lineno)\n jumps.append((target_label, instr))\n instructions.append(instr)\n offset += 1\n\n for target_label, instr in jumps:\n instr.arg = labels[target_label]\n\n return instructions\n\n def __eq__(self, other):\n if not isinstance(other, _InstrList):\n other = _InstrList(other)\n\n return self._flat() == other._flat()\n\n\nclass Bytecode(_InstrList, _BaseBytecodeList):\n def __init__(self, instructions=()):\n BaseBytecode.__init__(self)\n self.argnames = []\n for instr in instructions:\n self._check_instr(instr)\n self.extend(instructions)\n\n def __iter__(self):\n instructions = super().__iter__()\n for instr in instructions:\n self._check_instr(instr)\n yield instr\n\n def _check_instr(self, instr):\n if not isinstance(instr, (Label, SetLineno, Instr)):\n raise ValueError(\n "Bytecode must only contain Label, " "SetLineno, and Instr objects, " "but %s was found" % type(instr).__name__\n )\n\n def _copy_attr_from(self, bytecode):\n super()._copy_attr_from(bytecode)\n if isinstance(bytecode, Bytecode):\n self.argnames = bytecode.argnames\n\n @staticmethod\n def from_code(code):\n if sys.version_info[:2] >= (3, 11):\n raise RuntimeError("This is not updated for Python 3.11 onwards, use only up to Python 3.10!!")\n concrete = _bytecode.ConcreteBytecode.from_code(code)\n return concrete.to_bytecode()\n\n def compute_stacksize(self, *, check_pre_and_post=True):\n cfg = _bytecode.ControlFlowGraph.from_bytecode(self)\n return cfg.compute_stacksize(check_pre_and_post=check_pre_and_post)\n\n def to_code(self, compute_jumps_passes=None, stacksize=None, *, check_pre_and_post=True):\n # Prevent reconverting the concrete bytecode to bytecode and cfg to do the\n # calculation if we need to do it.\n if stacksize is None:\n stacksize = self.compute_stacksize(check_pre_and_post=check_pre_and_post)\n bc = self.to_concrete_bytecode(compute_jumps_passes=compute_jumps_passes)\n return bc.to_code(stacksize=stacksize)\n\n def to_concrete_bytecode(self, compute_jumps_passes=None):\n converter = _bytecode._ConvertBytecodeToConcrete(self)\n return converter.to_concrete_bytecode(compute_jumps_passes=compute_jumps_passes)\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\bytecode.py
bytecode.py
Python
7,297
0.95
0.293269
0.034682
awesome-app
448
2023-11-15T06:01:59.215512
MIT
false
3ccd0b0685e6f26e91aa7b54feaa32a7
import dis\nimport inspect\nimport opcode as _opcode\nimport struct\nimport sys\nimport types\n\n# alias to keep the 'bytecode' variable free\nfrom _pydevd_frame_eval.vendored import bytecode as _bytecode\nfrom _pydevd_frame_eval.vendored.bytecode.instr import (\n UNSET,\n Instr,\n Label,\n SetLineno,\n FreeVar,\n CellVar,\n Compare,\n const_key,\n _check_arg_int,\n)\n\n# - jumps use instruction\n# - lineno use bytes (dis.findlinestarts(code))\n# - dis displays bytes\nOFFSET_AS_INSTRUCTION = sys.version_info >= (3, 10)\n\n\ndef _set_docstring(code, consts):\n if not consts:\n return\n first_const = consts[0]\n if isinstance(first_const, str) or first_const is None:\n code.docstring = first_const\n\n\nclass ConcreteInstr(Instr):\n """Concrete instruction.\n\n arg must be an integer in the range 0..2147483647.\n\n It has a read-only size attribute.\n """\n\n __slots__ = ("_size", "_extended_args", "offset")\n\n def __init__(self, name, arg=UNSET, *, lineno=None, extended_args=None, offset=None):\n # Allow to remember a potentially meaningless EXTENDED_ARG emitted by\n # Python to properly compute the size and avoid messing up the jump\n # targets\n self._extended_args = extended_args\n self._set(name, arg, lineno)\n self.offset = offset\n\n def _check_arg(self, name, opcode, arg):\n if opcode >= _opcode.HAVE_ARGUMENT:\n if arg is UNSET:\n raise ValueError("operation %s requires an argument" % name)\n\n _check_arg_int(name, arg)\n else:\n if arg is not UNSET:\n raise ValueError("operation %s has no argument" % name)\n\n def _set(self, name, arg, lineno):\n super()._set(name, arg, lineno)\n size = 2\n if arg is not UNSET:\n while arg > 0xFF:\n size += 2\n arg >>= 8\n if self._extended_args is not None:\n size = 2 + 2 * self._extended_args\n self._size = size\n\n @property\n def size(self):\n return self._size\n\n def _cmp_key(self, labels=None):\n return (self._lineno, self._name, self._arg)\n\n def get_jump_target(self, instr_offset):\n if self._opcode in _opcode.hasjrel:\n s = (self._size // 2) if OFFSET_AS_INSTRUCTION else self._size\n return instr_offset + s + self._arg\n if self._opcode in _opcode.hasjabs:\n return self._arg\n return None\n\n def assemble(self):\n if self._arg is UNSET:\n return bytes((self._opcode, 0))\n\n arg = self._arg\n b = [self._opcode, arg & 0xFF]\n while arg > 0xFF:\n arg >>= 8\n b[:0] = [_opcode.EXTENDED_ARG, arg & 0xFF]\n\n if self._extended_args:\n while len(b) < self._size:\n b[:0] = [_opcode.EXTENDED_ARG, 0x00]\n\n return bytes(b)\n\n @classmethod\n def disassemble(cls, lineno, code, offset):\n index = 2 * offset if OFFSET_AS_INSTRUCTION else offset\n op = code[index]\n if op >= _opcode.HAVE_ARGUMENT:\n arg = code[index + 1]\n else:\n arg = UNSET\n name = _opcode.opname[op]\n # fabioz: added offset to ConcreteBytecode\n # Need to keep an eye on https://github.com/MatthieuDartiailh/bytecode/issues/48 in\n # case the library decides to add this in some other way.\n return cls(name, arg, lineno=lineno, offset=index)\n\n\nclass ConcreteBytecode(_bytecode._BaseBytecodeList):\n def __init__(self, instructions=(), *, consts=(), names=(), varnames=()):\n super().__init__()\n self.consts = list(consts)\n self.names = list(names)\n self.varnames = list(varnames)\n for instr in instructions:\n self._check_instr(instr)\n self.extend(instructions)\n\n def __iter__(self):\n instructions = super().__iter__()\n for instr in instructions:\n self._check_instr(instr)\n yield instr\n\n def _check_instr(self, instr):\n if not isinstance(instr, (ConcreteInstr, SetLineno)):\n raise ValueError(\n "ConcreteBytecode must only contain " "ConcreteInstr and SetLineno objects, " "but %s was found" % type(instr).__name__\n )\n\n def _copy_attr_from(self, bytecode):\n super()._copy_attr_from(bytecode)\n if isinstance(bytecode, ConcreteBytecode):\n self.consts = bytecode.consts\n self.names = bytecode.names\n self.varnames = bytecode.varnames\n\n def __repr__(self):\n return "<ConcreteBytecode instr#=%s>" % len(self)\n\n def __eq__(self, other):\n if type(self) != type(other):\n return False\n\n const_keys1 = list(map(const_key, self.consts))\n const_keys2 = list(map(const_key, other.consts))\n if const_keys1 != const_keys2:\n return False\n\n if self.names != other.names:\n return False\n if self.varnames != other.varnames:\n return False\n\n return super().__eq__(other)\n\n @staticmethod\n def from_code(code, *, extended_arg=False):\n line_starts = dict(entry for entry in dis.findlinestarts(code) if entry[1] is not None)\n\n # find block starts\n instructions = []\n offset = 0\n lineno = code.co_firstlineno\n while offset < (len(code.co_code) // (2 if OFFSET_AS_INSTRUCTION else 1)):\n lineno_off = (2 * offset) if OFFSET_AS_INSTRUCTION else offset\n if lineno_off in line_starts:\n lineno = line_starts[lineno_off]\n\n instr = ConcreteInstr.disassemble(lineno, code.co_code, offset)\n\n instructions.append(instr)\n offset += (instr.size // 2) if OFFSET_AS_INSTRUCTION else instr.size\n\n bytecode = ConcreteBytecode()\n\n # replace jump targets with blocks\n # HINT : in some cases Python generate useless EXTENDED_ARG opcode\n # with a value of zero. Such opcodes do not increases the size of the\n # following opcode the way a normal EXTENDED_ARG does. As a\n # consequence, they need to be tracked manually as otherwise the\n # offsets in jump targets can end up being wrong.\n if not extended_arg:\n # The list is modified in place\n bytecode._remove_extended_args(instructions)\n\n bytecode.name = code.co_name\n bytecode.filename = code.co_filename\n bytecode.flags = code.co_flags\n bytecode.argcount = code.co_argcount\n if sys.version_info >= (3, 8):\n bytecode.posonlyargcount = code.co_posonlyargcount\n bytecode.kwonlyargcount = code.co_kwonlyargcount\n bytecode.first_lineno = code.co_firstlineno\n bytecode.names = list(code.co_names)\n bytecode.consts = list(code.co_consts)\n bytecode.varnames = list(code.co_varnames)\n bytecode.freevars = list(code.co_freevars)\n bytecode.cellvars = list(code.co_cellvars)\n _set_docstring(bytecode, code.co_consts)\n\n bytecode[:] = instructions\n return bytecode\n\n @staticmethod\n def _normalize_lineno(instructions, first_lineno):\n lineno = first_lineno\n for instr in instructions:\n # if instr.lineno is not set, it's inherited from the previous\n # instruction, or from self.first_lineno\n if instr.lineno is not None:\n lineno = instr.lineno\n\n if isinstance(instr, ConcreteInstr):\n yield (lineno, instr)\n\n def _assemble_code(self):\n offset = 0\n code_str = []\n linenos = []\n for lineno, instr in self._normalize_lineno(self, self.first_lineno):\n code_str.append(instr.assemble())\n i_size = instr.size\n linenos.append(((offset * 2) if OFFSET_AS_INSTRUCTION else offset, i_size, lineno))\n offset += (i_size // 2) if OFFSET_AS_INSTRUCTION else i_size\n code_str = b"".join(code_str)\n return (code_str, linenos)\n\n @staticmethod\n def _assemble_lnotab(first_lineno, linenos):\n lnotab = []\n old_offset = 0\n old_lineno = first_lineno\n for offset, _, lineno in linenos:\n dlineno = lineno - old_lineno\n if dlineno == 0:\n continue\n # FIXME: be kind, force monotonic line numbers? add an option?\n if dlineno < 0 and sys.version_info < (3, 6):\n raise ValueError("negative line number delta is not supported " "on Python < 3.6")\n old_lineno = lineno\n\n doff = offset - old_offset\n old_offset = offset\n\n while doff > 255:\n lnotab.append(b"\xff\x00")\n doff -= 255\n\n while dlineno < -128:\n lnotab.append(struct.pack("Bb", doff, -128))\n doff = 0\n dlineno -= -128\n\n while dlineno > 127:\n lnotab.append(struct.pack("Bb", doff, 127))\n doff = 0\n dlineno -= 127\n\n assert 0 <= doff <= 255\n assert -128 <= dlineno <= 127\n\n lnotab.append(struct.pack("Bb", doff, dlineno))\n\n return b"".join(lnotab)\n\n @staticmethod\n def _pack_linetable(doff, dlineno, linetable):\n while dlineno < -127:\n linetable.append(struct.pack("Bb", 0, -127))\n dlineno -= -127\n\n while dlineno > 127:\n linetable.append(struct.pack("Bb", 0, 127))\n dlineno -= 127\n\n if doff > 254:\n linetable.append(struct.pack("Bb", 254, dlineno))\n doff -= 254\n\n while doff > 254:\n linetable.append(b"\xfe\x00")\n doff -= 254\n linetable.append(struct.pack("Bb", doff, 0))\n\n else:\n linetable.append(struct.pack("Bb", doff, dlineno))\n\n assert 0 <= doff <= 254\n assert -127 <= dlineno <= 127\n\n def _assemble_linestable(self, first_lineno, linenos):\n if not linenos:\n return b""\n\n linetable = []\n old_offset = 0\n\n iter_in = iter(linenos)\n\n offset, i_size, old_lineno = next(iter_in)\n old_dlineno = old_lineno - first_lineno\n for offset, i_size, lineno in iter_in:\n dlineno = lineno - old_lineno\n if dlineno == 0:\n continue\n old_lineno = lineno\n\n doff = offset - old_offset\n old_offset = offset\n\n self._pack_linetable(doff, old_dlineno, linetable)\n old_dlineno = dlineno\n\n # Pack the line of the last instruction.\n doff = offset + i_size - old_offset\n self._pack_linetable(doff, old_dlineno, linetable)\n\n return b"".join(linetable)\n\n @staticmethod\n def _remove_extended_args(instructions):\n # replace jump targets with blocks\n # HINT : in some cases Python generate useless EXTENDED_ARG opcode\n # with a value of zero. Such opcodes do not increases the size of the\n # following opcode the way a normal EXTENDED_ARG does. As a\n # consequence, they need to be tracked manually as otherwise the\n # offsets in jump targets can end up being wrong.\n nb_extended_args = 0\n extended_arg = None\n index = 0\n while index < len(instructions):\n instr = instructions[index]\n\n # Skip SetLineno meta instruction\n if isinstance(instr, SetLineno):\n index += 1\n continue\n\n if instr.name == "EXTENDED_ARG":\n nb_extended_args += 1\n if extended_arg is not None:\n extended_arg = (extended_arg << 8) + instr.arg\n else:\n extended_arg = instr.arg\n\n del instructions[index]\n continue\n\n if extended_arg is not None:\n arg = (extended_arg << 8) + instr.arg\n extended_arg = None\n\n instr = ConcreteInstr(\n instr.name,\n arg,\n lineno=instr.lineno,\n extended_args=nb_extended_args,\n offset=instr.offset,\n )\n instructions[index] = instr\n nb_extended_args = 0\n\n index += 1\n\n if extended_arg is not None:\n raise ValueError("EXTENDED_ARG at the end of the code")\n\n def compute_stacksize(self, *, check_pre_and_post=True):\n bytecode = self.to_bytecode()\n cfg = _bytecode.ControlFlowGraph.from_bytecode(bytecode)\n return cfg.compute_stacksize(check_pre_and_post=check_pre_and_post)\n\n def to_code(self, stacksize=None, *, check_pre_and_post=True):\n code_str, linenos = self._assemble_code()\n lnotab = (\n self._assemble_linestable(self.first_lineno, linenos)\n if sys.version_info >= (3, 10)\n else self._assemble_lnotab(self.first_lineno, linenos)\n )\n nlocals = len(self.varnames)\n if stacksize is None:\n stacksize = self.compute_stacksize(check_pre_and_post=check_pre_and_post)\n\n if sys.version_info < (3, 8):\n return types.CodeType(\n self.argcount,\n self.kwonlyargcount,\n nlocals,\n stacksize,\n int(self.flags),\n code_str,\n tuple(self.consts),\n tuple(self.names),\n tuple(self.varnames),\n self.filename,\n self.name,\n self.first_lineno,\n lnotab,\n tuple(self.freevars),\n tuple(self.cellvars),\n )\n else:\n return types.CodeType(\n self.argcount,\n self.posonlyargcount,\n self.kwonlyargcount,\n nlocals,\n stacksize,\n int(self.flags),\n code_str,\n tuple(self.consts),\n tuple(self.names),\n tuple(self.varnames),\n self.filename,\n self.name,\n self.first_lineno,\n lnotab,\n tuple(self.freevars),\n tuple(self.cellvars),\n )\n\n def to_bytecode(self):\n # Copy instruction and remove extended args if any (in-place)\n c_instructions = self[:]\n self._remove_extended_args(c_instructions)\n\n # find jump targets\n jump_targets = set()\n offset = 0\n for instr in c_instructions:\n if isinstance(instr, SetLineno):\n continue\n target = instr.get_jump_target(offset)\n if target is not None:\n jump_targets.add(target)\n offset += (instr.size // 2) if OFFSET_AS_INSTRUCTION else instr.size\n\n # create labels\n jumps = []\n instructions = []\n labels = {}\n offset = 0\n ncells = len(self.cellvars)\n\n for lineno, instr in self._normalize_lineno(c_instructions, self.first_lineno):\n if offset in jump_targets:\n label = Label()\n labels[offset] = label\n instructions.append(label)\n\n jump_target = instr.get_jump_target(offset)\n size = instr.size\n\n arg = instr.arg\n # FIXME: better error reporting\n if instr.opcode in _opcode.hasconst:\n arg = self.consts[arg]\n elif instr.opcode in _opcode.haslocal:\n arg = self.varnames[arg]\n elif instr.opcode in _opcode.hasname:\n arg = self.names[arg]\n elif instr.opcode in _opcode.hasfree:\n if arg < ncells:\n name = self.cellvars[arg]\n arg = CellVar(name)\n else:\n name = self.freevars[arg - ncells]\n arg = FreeVar(name)\n elif instr.opcode in _opcode.hascompare:\n arg = Compare(arg)\n\n if jump_target is None:\n instr = Instr(instr.name, arg, lineno=lineno, offset=instr.offset)\n else:\n instr_index = len(instructions)\n instructions.append(instr)\n offset += (size // 2) if OFFSET_AS_INSTRUCTION else size\n\n if jump_target is not None:\n jumps.append((instr_index, jump_target))\n\n # replace jump targets with labels\n for index, jump_target in jumps:\n instr = instructions[index]\n # FIXME: better error reporting on missing label\n label = labels[jump_target]\n instructions[index] = Instr(instr.name, label, lineno=instr.lineno, offset=instr.offset)\n\n bytecode = _bytecode.Bytecode()\n bytecode._copy_attr_from(self)\n\n nargs = bytecode.argcount + bytecode.kwonlyargcount\n if sys.version_info > (3, 8):\n nargs += bytecode.posonlyargcount\n if bytecode.flags & inspect.CO_VARARGS:\n nargs += 1\n if bytecode.flags & inspect.CO_VARKEYWORDS:\n nargs += 1\n bytecode.argnames = self.varnames[:nargs]\n _set_docstring(bytecode, self.consts)\n\n bytecode.extend(instructions)\n return bytecode\n\n\nclass _ConvertBytecodeToConcrete:\n # Default number of passes of compute_jumps() before giving up. Refer to\n # assemble_jump_offsets() in compile.c for background.\n _compute_jumps_passes = 10\n\n def __init__(self, code):\n assert isinstance(code, _bytecode.Bytecode)\n self.bytecode = code\n\n # temporary variables\n self.instructions = []\n self.jumps = []\n self.labels = {}\n\n # used to build ConcreteBytecode() object\n self.consts_indices = {}\n self.consts_list = []\n self.names = []\n self.varnames = []\n\n def add_const(self, value):\n key = const_key(value)\n if key in self.consts_indices:\n return self.consts_indices[key]\n index = len(self.consts_indices)\n self.consts_indices[key] = index\n self.consts_list.append(value)\n return index\n\n @staticmethod\n def add(names, name):\n try:\n index = names.index(name)\n except ValueError:\n index = len(names)\n names.append(name)\n return index\n\n def concrete_instructions(self):\n ncells = len(self.bytecode.cellvars)\n lineno = self.bytecode.first_lineno\n\n for instr in self.bytecode:\n if isinstance(instr, Label):\n self.labels[instr] = len(self.instructions)\n continue\n\n if isinstance(instr, SetLineno):\n lineno = instr.lineno\n continue\n\n if isinstance(instr, ConcreteInstr):\n instr = instr.copy()\n else:\n assert isinstance(instr, Instr)\n\n if instr.lineno is not None:\n lineno = instr.lineno\n\n arg = instr.arg\n is_jump = isinstance(arg, Label)\n if is_jump:\n label = arg\n # fake value, real value is set in compute_jumps()\n arg = 0\n elif instr.opcode in _opcode.hasconst:\n arg = self.add_const(arg)\n elif instr.opcode in _opcode.haslocal:\n arg = self.add(self.varnames, arg)\n elif instr.opcode in _opcode.hasname:\n arg = self.add(self.names, arg)\n elif instr.opcode in _opcode.hasfree:\n if isinstance(arg, CellVar):\n arg = self.bytecode.cellvars.index(arg.name)\n else:\n assert isinstance(arg, FreeVar)\n arg = ncells + self.bytecode.freevars.index(arg.name)\n elif instr.opcode in _opcode.hascompare:\n if isinstance(arg, Compare):\n arg = arg.value\n\n instr = ConcreteInstr(instr.name, arg, lineno=lineno)\n if is_jump:\n self.jumps.append((len(self.instructions), label, instr))\n\n self.instructions.append(instr)\n\n def compute_jumps(self):\n offsets = []\n offset = 0\n for index, instr in enumerate(self.instructions):\n offsets.append(offset)\n offset += instr.size // 2 if OFFSET_AS_INSTRUCTION else instr.size\n # needed if a label is at the end\n offsets.append(offset)\n\n # fix argument of jump instructions: resolve labels\n modified = False\n for index, label, instr in self.jumps:\n target_index = self.labels[label]\n target_offset = offsets[target_index]\n\n if instr.opcode in _opcode.hasjrel:\n instr_offset = offsets[index]\n target_offset -= instr_offset + (instr.size // 2 if OFFSET_AS_INSTRUCTION else instr.size)\n\n old_size = instr.size\n # FIXME: better error report if target_offset is negative\n instr.arg = target_offset\n if instr.size != old_size:\n modified = True\n\n return modified\n\n def to_concrete_bytecode(self, compute_jumps_passes=None):\n if compute_jumps_passes is None:\n compute_jumps_passes = self._compute_jumps_passes\n\n first_const = self.bytecode.docstring\n if first_const is not UNSET:\n self.add_const(first_const)\n\n self.varnames.extend(self.bytecode.argnames)\n\n self.concrete_instructions()\n for pas in range(0, compute_jumps_passes):\n modified = self.compute_jumps()\n if not modified:\n break\n else:\n raise RuntimeError("compute_jumps() failed to converge after" " %d passes" % (pas + 1))\n\n concrete = ConcreteBytecode(\n self.instructions,\n consts=self.consts_list.copy(),\n names=self.names,\n varnames=self.varnames,\n )\n concrete._copy_attr_from(self.bytecode)\n return concrete\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\concrete.py
concrete.py
Python
22,811
0.95
0.208207
0.078755
vue-tools
130
2024-03-08T23:11:46.051327
GPL-3.0
false
05e2bf81466a876fe9d06e34209bc4db
# alias to keep the 'bytecode' variable free\nimport sys\nfrom enum import IntFlag\nfrom _pydevd_frame_eval.vendored import bytecode as _bytecode\n\n\nclass CompilerFlags(IntFlag):\n """Possible values of the co_flags attribute of Code object.\n\n Note: We do not rely on inspect values here as some of them are missing and\n furthermore would be version dependent.\n\n """\n\n OPTIMIZED = 0x00001 # noqa\n NEWLOCALS = 0x00002 # noqa\n VARARGS = 0x00004 # noqa\n VARKEYWORDS = 0x00008 # noqa\n NESTED = 0x00010 # noqa\n GENERATOR = 0x00020 # noqa\n NOFREE = 0x00040 # noqa\n # New in Python 3.5\n # Used for coroutines defined using async def ie native coroutine\n COROUTINE = 0x00080 # noqa\n # Used for coroutines defined as a generator and then decorated using\n # types.coroutine\n ITERABLE_COROUTINE = 0x00100 # noqa\n # New in Python 3.6\n # Generator defined in an async def function\n ASYNC_GENERATOR = 0x00200 # noqa\n\n # __future__ flags\n # future flags changed in Python 3.9\n if sys.version_info < (3, 9):\n FUTURE_GENERATOR_STOP = 0x80000 # noqa\n if sys.version_info > (3, 6):\n FUTURE_ANNOTATIONS = 0x100000\n else:\n FUTURE_GENERATOR_STOP = 0x800000 # noqa\n FUTURE_ANNOTATIONS = 0x1000000\n\n\ndef infer_flags(bytecode, is_async=None):\n """Infer the proper flags for a bytecode based on the instructions.\n\n Because the bytecode does not have enough context to guess if a function\n is asynchronous the algorithm tries to be conservative and will never turn\n a previously async code into a sync one.\n\n Parameters\n ----------\n bytecode : Bytecode | ConcreteBytecode | ControlFlowGraph\n Bytecode for which to infer the proper flags\n is_async : bool | None, optional\n Force the code to be marked as asynchronous if True, prevent it from\n being marked as asynchronous if False and simply infer the best\n solution based on the opcode and the existing flag if None.\n\n """\n flags = CompilerFlags(0)\n if not isinstance(\n bytecode,\n (_bytecode.Bytecode, _bytecode.ConcreteBytecode, _bytecode.ControlFlowGraph),\n ):\n msg = "Expected a Bytecode, ConcreteBytecode or ControlFlowGraph " "instance not %s"\n raise ValueError(msg % bytecode)\n\n instructions = bytecode.get_instructions() if isinstance(bytecode, _bytecode.ControlFlowGraph) else bytecode\n instr_names = {i.name for i in instructions if not isinstance(i, (_bytecode.SetLineno, _bytecode.Label))}\n\n # Identify optimized code\n if not (instr_names & {"STORE_NAME", "LOAD_NAME", "DELETE_NAME"}):\n flags |= CompilerFlags.OPTIMIZED\n\n # Check for free variables\n if not (\n instr_names\n & {\n "LOAD_CLOSURE",\n "LOAD_DEREF",\n "STORE_DEREF",\n "DELETE_DEREF",\n "LOAD_CLASSDEREF",\n }\n ):\n flags |= CompilerFlags.NOFREE\n\n # Copy flags for which we cannot infer the right value\n flags |= bytecode.flags & (CompilerFlags.NEWLOCALS | CompilerFlags.VARARGS | CompilerFlags.VARKEYWORDS | CompilerFlags.NESTED)\n\n sure_generator = instr_names & {"YIELD_VALUE"}\n maybe_generator = instr_names & {"YIELD_VALUE", "YIELD_FROM"}\n\n sure_async = instr_names & {\n "GET_AWAITABLE",\n "GET_AITER",\n "GET_ANEXT",\n "BEFORE_ASYNC_WITH",\n "SETUP_ASYNC_WITH",\n "END_ASYNC_FOR",\n }\n\n # If performing inference or forcing an async behavior, first inspect\n # the flags since this is the only way to identify iterable coroutines\n if is_async in (None, True):\n if bytecode.flags & CompilerFlags.COROUTINE:\n if sure_generator:\n flags |= CompilerFlags.ASYNC_GENERATOR\n else:\n flags |= CompilerFlags.COROUTINE\n elif bytecode.flags & CompilerFlags.ITERABLE_COROUTINE:\n if sure_async:\n msg = (\n "The ITERABLE_COROUTINE flag is set but bytecode that"\n "can only be used in async functions have been "\n "detected. Please unset that flag before performing "\n "inference."\n )\n raise ValueError(msg)\n flags |= CompilerFlags.ITERABLE_COROUTINE\n elif bytecode.flags & CompilerFlags.ASYNC_GENERATOR:\n if not sure_generator:\n flags |= CompilerFlags.COROUTINE\n else:\n flags |= CompilerFlags.ASYNC_GENERATOR\n\n # If the code was not asynchronous before determine if it should now be\n # asynchronous based on the opcode and the is_async argument.\n else:\n if sure_async:\n # YIELD_FROM is not allowed in async generator\n if sure_generator:\n flags |= CompilerFlags.ASYNC_GENERATOR\n else:\n flags |= CompilerFlags.COROUTINE\n\n elif maybe_generator:\n if is_async:\n if sure_generator:\n flags |= CompilerFlags.ASYNC_GENERATOR\n else:\n flags |= CompilerFlags.COROUTINE\n else:\n flags |= CompilerFlags.GENERATOR\n\n elif is_async:\n flags |= CompilerFlags.COROUTINE\n\n # If the code should not be asynchronous, check first it is possible and\n # next set the GENERATOR flag if relevant\n else:\n if sure_async:\n raise ValueError(\n "The is_async argument is False but bytecodes " "that can only be used in async functions have " "been detected."\n )\n\n if maybe_generator:\n flags |= CompilerFlags.GENERATOR\n\n flags |= bytecode.flags & CompilerFlags.FUTURE_GENERATOR_STOP\n\n return flags\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\flags.py
flags.py
Python
6,013
0.95
0.228395
0.138686
vue-tools
587
2025-06-11T09:05:31.424305
Apache-2.0
false
6aac17fb0aaf18f02481b64f10d72eab
import enum\nimport dis\nimport opcode as _opcode\nimport sys\nfrom marshal import dumps as _dumps\n\nfrom _pydevd_frame_eval.vendored import bytecode as _bytecode\n\n\n@enum.unique\nclass Compare(enum.IntEnum):\n LT = 0\n LE = 1\n EQ = 2\n NE = 3\n GT = 4\n GE = 5\n IN = 6\n NOT_IN = 7\n IS = 8\n IS_NOT = 9\n EXC_MATCH = 10\n\n\nUNSET = object()\n\n\ndef const_key(obj):\n try:\n return _dumps(obj)\n except ValueError:\n # For other types, we use the object identifier as an unique identifier\n # to ensure that they are seen as unequal.\n return (type(obj), id(obj))\n\n\ndef _pushes_back(opname):\n if opname in ["CALL_FINALLY"]:\n # CALL_FINALLY pushes the address of the "finally" block instead of a\n # value, hence we don't treat it as pushing back op\n return False\n return (\n opname.startswith("UNARY_")\n or opname.startswith("GET_")\n # BUILD_XXX_UNPACK have been removed in 3.9\n or opname.startswith("BINARY_")\n or opname.startswith("INPLACE_")\n or opname.startswith("BUILD_")\n or opname.startswith("CALL_")\n ) or opname in (\n "LIST_TO_TUPLE",\n "LIST_EXTEND",\n "SET_UPDATE",\n "DICT_UPDATE",\n "DICT_MERGE",\n "IS_OP",\n "CONTAINS_OP",\n "FORMAT_VALUE",\n "MAKE_FUNCTION",\n "IMPORT_NAME",\n # technically, these three do not push back, but leave the container\n # object on TOS\n "SET_ADD",\n "LIST_APPEND",\n "MAP_ADD",\n "LOAD_ATTR",\n )\n\n\ndef _check_lineno(lineno):\n if not isinstance(lineno, int):\n raise TypeError("lineno must be an int")\n if lineno < 1:\n raise ValueError("invalid lineno")\n\n\nclass SetLineno:\n __slots__ = ("_lineno",)\n\n def __init__(self, lineno):\n _check_lineno(lineno)\n self._lineno = lineno\n\n @property\n def lineno(self):\n return self._lineno\n\n def __eq__(self, other):\n if not isinstance(other, SetLineno):\n return False\n return self._lineno == other._lineno\n\n\nclass Label:\n __slots__ = ()\n\n\nclass _Variable:\n __slots__ = ("name",)\n\n def __init__(self, name):\n self.name = name\n\n def __eq__(self, other):\n if type(self) != type(other):\n return False\n return self.name == other.name\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return "<%s %r>" % (self.__class__.__name__, self.name)\n\n\nclass CellVar(_Variable):\n __slots__ = ()\n\n\nclass FreeVar(_Variable):\n __slots__ = ()\n\n\ndef _check_arg_int(name, arg):\n if not isinstance(arg, int):\n raise TypeError("operation %s argument must be an int, " "got %s" % (name, type(arg).__name__))\n\n if not (0 <= arg <= 2147483647):\n raise ValueError("operation %s argument must be in " "the range 0..2,147,483,647" % name)\n\n\nif sys.version_info < (3, 8):\n _stack_effects = {\n # NOTE: the entries are all 2-tuples. Entry[0/False] is non-taken jumps.\n # Entry[1/True] is for taken jumps.\n # opcodes not in dis.stack_effect\n _opcode.opmap["EXTENDED_ARG"]: (0, 0),\n _opcode.opmap["NOP"]: (0, 0),\n # Jump taken/not-taken are different:\n _opcode.opmap["JUMP_IF_TRUE_OR_POP"]: (-1, 0),\n _opcode.opmap["JUMP_IF_FALSE_OR_POP"]: (-1, 0),\n _opcode.opmap["FOR_ITER"]: (1, -1),\n _opcode.opmap["SETUP_WITH"]: (1, 6),\n _opcode.opmap["SETUP_ASYNC_WITH"]: (0, 5),\n _opcode.opmap["SETUP_EXCEPT"]: (0, 6), # as of 3.7, below for <=3.6\n _opcode.opmap["SETUP_FINALLY"]: (0, 6), # as of 3.7, below for <=3.6\n }\n\n # More stack effect values that are unique to the version of Python.\n if sys.version_info < (3, 7):\n _stack_effects.update(\n {\n _opcode.opmap["SETUP_WITH"]: (7, 7),\n _opcode.opmap["SETUP_EXCEPT"]: (6, 9),\n _opcode.opmap["SETUP_FINALLY"]: (6, 9),\n }\n )\n\n\nclass Instr:\n """Abstract instruction."""\n\n __slots__ = ("_name", "_opcode", "_arg", "_lineno", "offset")\n\n def __init__(self, name, arg=UNSET, *, lineno=None, offset=None):\n self._set(name, arg, lineno)\n self.offset = offset\n\n def _check_arg(self, name, opcode, arg):\n if name == "EXTENDED_ARG":\n raise ValueError(\n "only concrete instruction can contain EXTENDED_ARG, " "highlevel instruction can represent arbitrary argument without it"\n )\n\n if opcode >= _opcode.HAVE_ARGUMENT:\n if arg is UNSET:\n raise ValueError("operation %s requires an argument" % name)\n else:\n if arg is not UNSET:\n raise ValueError("operation %s has no argument" % name)\n\n if self._has_jump(opcode):\n if not isinstance(arg, (Label, _bytecode.BasicBlock)):\n raise TypeError("operation %s argument type must be " "Label or BasicBlock, got %s" % (name, type(arg).__name__))\n\n elif opcode in _opcode.hasfree:\n if not isinstance(arg, (CellVar, FreeVar)):\n raise TypeError("operation %s argument must be CellVar " "or FreeVar, got %s" % (name, type(arg).__name__))\n\n elif opcode in _opcode.haslocal or opcode in _opcode.hasname:\n if not isinstance(arg, str):\n raise TypeError("operation %s argument must be a str, " "got %s" % (name, type(arg).__name__))\n\n elif opcode in _opcode.hasconst:\n if isinstance(arg, Label):\n raise ValueError("label argument cannot be used " "in %s operation" % name)\n if isinstance(arg, _bytecode.BasicBlock):\n raise ValueError("block argument cannot be used " "in %s operation" % name)\n\n elif opcode in _opcode.hascompare:\n if not isinstance(arg, Compare):\n raise TypeError("operation %s argument type must be " "Compare, got %s" % (name, type(arg).__name__))\n\n elif opcode >= _opcode.HAVE_ARGUMENT:\n _check_arg_int(name, arg)\n\n def _set(self, name, arg, lineno):\n if not isinstance(name, str):\n raise TypeError("operation name must be a str")\n try:\n opcode = _opcode.opmap[name]\n except KeyError:\n raise ValueError("invalid operation name")\n\n # check lineno\n if lineno is not None:\n _check_lineno(lineno)\n\n self._check_arg(name, opcode, arg)\n\n self._name = name\n self._opcode = opcode\n self._arg = arg\n self._lineno = lineno\n\n def set(self, name, arg=UNSET):\n """Modify the instruction in-place.\n\n Replace name and arg attributes. Don't modify lineno.\n """\n self._set(name, arg, self._lineno)\n\n def require_arg(self):\n """Does the instruction require an argument?"""\n return self._opcode >= _opcode.HAVE_ARGUMENT\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n self._set(name, self._arg, self._lineno)\n\n @property\n def opcode(self):\n return self._opcode\n\n @opcode.setter\n def opcode(self, op):\n if not isinstance(op, int):\n raise TypeError("operator code must be an int")\n if 0 <= op <= 255:\n name = _opcode.opname[op]\n valid = name != "<%r>" % op\n else:\n valid = False\n if not valid:\n raise ValueError("invalid operator code")\n\n self._set(name, self._arg, self._lineno)\n\n @property\n def arg(self):\n return self._arg\n\n @arg.setter\n def arg(self, arg):\n self._set(self._name, arg, self._lineno)\n\n @property\n def lineno(self):\n return self._lineno\n\n @lineno.setter\n def lineno(self, lineno):\n self._set(self._name, self._arg, lineno)\n\n def stack_effect(self, jump=None):\n if self._opcode < _opcode.HAVE_ARGUMENT:\n arg = None\n elif not isinstance(self._arg, int) or self._opcode in _opcode.hasconst:\n # Argument is either a non-integer or an integer constant,\n # not oparg.\n arg = 0\n else:\n arg = self._arg\n\n if sys.version_info < (3, 8):\n effect = _stack_effects.get(self._opcode, None)\n if effect is not None:\n return max(effect) if jump is None else effect[jump]\n return dis.stack_effect(self._opcode, arg)\n else:\n return dis.stack_effect(self._opcode, arg, jump=jump)\n\n def pre_and_post_stack_effect(self, jump=None):\n _effect = self.stack_effect(jump=jump)\n\n # To compute pre size and post size to avoid segfault cause by not enough\n # stack element\n _opname = _opcode.opname[self._opcode]\n if _opname.startswith("DUP_TOP"):\n return _effect * -1, _effect * 2\n if _pushes_back(_opname):\n # if the op pushes value back to the stack, then the stack effect given\n # by dis.stack_effect actually equals pre + post effect, therefore we need\n # -1 from the stack effect as a pre condition\n return _effect - 1, 1\n if _opname.startswith("UNPACK_"):\n # Instr(UNPACK_* , n) pops 1 and pushes n\n # _effect = n - 1\n # hence we return -1, _effect + 1\n return -1, _effect + 1\n if _opname == "FOR_ITER" and not jump:\n # Since FOR_ITER needs TOS to be an iterator, which basically means\n # a prerequisite of 1 on the stack\n return -1, 2\n if _opname == "ROT_N":\n return (-self._arg, self._arg)\n return {"ROT_TWO": (-2, 2), "ROT_THREE": (-3, 3), "ROT_FOUR": (-4, 4)}.get(_opname, (_effect, 0))\n\n def copy(self):\n return self.__class__(self._name, self._arg, lineno=self._lineno, offset=self.offset)\n\n def __repr__(self):\n if self._arg is not UNSET:\n return "<%s arg=%r lineno=%s>" % (self._name, self._arg, self._lineno)\n else:\n return "<%s lineno=%s>" % (self._name, self._lineno)\n\n def _cmp_key(self, labels=None):\n arg = self._arg\n if self._opcode in _opcode.hasconst:\n arg = const_key(arg)\n elif isinstance(arg, Label) and labels is not None:\n arg = labels[arg]\n return (self._lineno, self._name, arg)\n\n def __eq__(self, other):\n if type(self) != type(other):\n return False\n return self._cmp_key() == other._cmp_key()\n\n @staticmethod\n def _has_jump(opcode):\n return opcode in _opcode.hasjrel or opcode in _opcode.hasjabs\n\n def has_jump(self):\n return self._has_jump(self._opcode)\n\n def is_cond_jump(self):\n """Is a conditional jump?"""\n # Ex: POP_JUMP_IF_TRUE, JUMP_IF_FALSE_OR_POP\n return "JUMP_IF_" in self._name\n\n def is_uncond_jump(self):\n """Is an unconditional jump?"""\n return self.name in {"JUMP_FORWARD", "JUMP_ABSOLUTE"}\n\n def is_final(self):\n if self._name in {\n "RETURN_VALUE",\n "RAISE_VARARGS",\n "RERAISE",\n "BREAK_LOOP",\n "CONTINUE_LOOP",\n }:\n return True\n if self.is_uncond_jump():\n return True\n return False\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\instr.py
instr.py
Python
11,691
0.95
0.233871
0.087542
awesome-app
187
2024-09-15T09:17:49.785108
BSD-3-Clause
false
f9777ec231ec63af2a88902127e25c72
"""\nPeephole optimizer of CPython 3.6 reimplemented in pure Python using\nthe bytecode module.\n"""\nimport opcode\nimport operator\nimport sys\nfrom _pydevd_frame_eval.vendored.bytecode import Instr, Bytecode, ControlFlowGraph, BasicBlock, Compare\n\nJUMPS_ON_TRUE = frozenset(\n (\n "POP_JUMP_IF_TRUE",\n "JUMP_IF_TRUE_OR_POP",\n )\n)\n\nNOT_COMPARE = {\n Compare.IN: Compare.NOT_IN,\n Compare.NOT_IN: Compare.IN,\n Compare.IS: Compare.IS_NOT,\n Compare.IS_NOT: Compare.IS,\n}\n\nMAX_SIZE = 20\n\n\nclass ExitUnchanged(Exception):\n """Exception used to skip the peephole optimizer"""\n\n pass\n\n\nclass PeepholeOptimizer:\n """Python reimplementation of the peephole optimizer.\n\n Copy of the C comment:\n\n Perform basic peephole optimizations to components of a code object.\n The consts object should still be in list form to allow new constants\n to be appended.\n\n To keep the optimizer simple, it bails out (does nothing) for code that\n has a length over 32,700, and does not calculate extended arguments.\n That allows us to avoid overflow and sign issues. Likewise, it bails when\n the lineno table has complex encoding for gaps >= 255. EXTENDED_ARG can\n appear before MAKE_FUNCTION; in this case both opcodes are skipped.\n EXTENDED_ARG preceding any other opcode causes the optimizer to bail.\n\n Optimizations are restricted to simple transformations occuring within a\n single basic block. All transformations keep the code size the same or\n smaller. For those that reduce size, the gaps are initially filled with\n NOPs. Later those NOPs are removed and the jump addresses retargeted in\n a single pass. Code offset is adjusted accordingly.\n """\n\n def __init__(self):\n # bytecode.ControlFlowGraph instance\n self.code = None\n self.const_stack = None\n self.block_index = None\n self.block = None\n # index of the current instruction in self.block instructions\n self.index = None\n # whether we are in a LOAD_CONST sequence\n self.in_consts = False\n\n def check_result(self, value):\n try:\n size = len(value)\n except TypeError:\n return True\n return size <= MAX_SIZE\n\n def replace_load_const(self, nconst, instr, result):\n # FIXME: remove temporary computed constants?\n # FIXME: or at least reuse existing constants?\n\n self.in_consts = True\n\n load_const = Instr("LOAD_CONST", result, lineno=instr.lineno)\n start = self.index - nconst - 1\n self.block[start : self.index] = (load_const,)\n self.index -= nconst\n\n if nconst:\n del self.const_stack[-nconst:]\n self.const_stack.append(result)\n self.in_consts = True\n\n def eval_LOAD_CONST(self, instr):\n self.in_consts = True\n value = instr.arg\n self.const_stack.append(value)\n self.in_consts = True\n\n def unaryop(self, op, instr):\n try:\n value = self.const_stack[-1]\n result = op(value)\n except IndexError:\n return\n\n if not self.check_result(result):\n return\n\n self.replace_load_const(1, instr, result)\n\n def eval_UNARY_POSITIVE(self, instr):\n return self.unaryop(operator.pos, instr)\n\n def eval_UNARY_NEGATIVE(self, instr):\n return self.unaryop(operator.neg, instr)\n\n def eval_UNARY_INVERT(self, instr):\n return self.unaryop(operator.invert, instr)\n\n def get_next_instr(self, name):\n try:\n next_instr = self.block[self.index]\n except IndexError:\n return None\n if next_instr.name == name:\n return next_instr\n return None\n\n def eval_UNARY_NOT(self, instr):\n # Note: UNARY_NOT <const> is not optimized\n\n next_instr = self.get_next_instr("POP_JUMP_IF_FALSE")\n if next_instr is None:\n return None\n\n # Replace UNARY_NOT+POP_JUMP_IF_FALSE with POP_JUMP_IF_TRUE\n instr.set("POP_JUMP_IF_TRUE", next_instr.arg)\n del self.block[self.index]\n\n def binop(self, op, instr):\n try:\n left = self.const_stack[-2]\n right = self.const_stack[-1]\n except IndexError:\n return\n\n try:\n result = op(left, right)\n except Exception:\n return\n\n if not self.check_result(result):\n return\n\n self.replace_load_const(2, instr, result)\n\n def eval_BINARY_ADD(self, instr):\n return self.binop(operator.add, instr)\n\n def eval_BINARY_SUBTRACT(self, instr):\n return self.binop(operator.sub, instr)\n\n def eval_BINARY_MULTIPLY(self, instr):\n return self.binop(operator.mul, instr)\n\n def eval_BINARY_TRUE_DIVIDE(self, instr):\n return self.binop(operator.truediv, instr)\n\n def eval_BINARY_FLOOR_DIVIDE(self, instr):\n return self.binop(operator.floordiv, instr)\n\n def eval_BINARY_MODULO(self, instr):\n return self.binop(operator.mod, instr)\n\n def eval_BINARY_POWER(self, instr):\n return self.binop(operator.pow, instr)\n\n def eval_BINARY_LSHIFT(self, instr):\n return self.binop(operator.lshift, instr)\n\n def eval_BINARY_RSHIFT(self, instr):\n return self.binop(operator.rshift, instr)\n\n def eval_BINARY_AND(self, instr):\n return self.binop(operator.and_, instr)\n\n def eval_BINARY_OR(self, instr):\n return self.binop(operator.or_, instr)\n\n def eval_BINARY_XOR(self, instr):\n return self.binop(operator.xor, instr)\n\n def eval_BINARY_SUBSCR(self, instr):\n return self.binop(operator.getitem, instr)\n\n def replace_container_of_consts(self, instr, container_type):\n items = self.const_stack[-instr.arg :]\n value = container_type(items)\n self.replace_load_const(instr.arg, instr, value)\n\n def build_tuple_unpack_seq(self, instr):\n next_instr = self.get_next_instr("UNPACK_SEQUENCE")\n if next_instr is None or next_instr.arg != instr.arg:\n return\n\n if instr.arg < 1:\n return\n\n if self.const_stack and instr.arg <= len(self.const_stack):\n nconst = instr.arg\n start = self.index - 1\n\n # Rewrite LOAD_CONST instructions in the reverse order\n load_consts = self.block[start - nconst : start]\n self.block[start - nconst : start] = reversed(load_consts)\n\n # Remove BUILD_TUPLE+UNPACK_SEQUENCE\n self.block[start : start + 2] = ()\n self.index -= 2\n self.const_stack.clear()\n return\n\n if instr.arg == 1:\n # Replace BUILD_TUPLE 1 + UNPACK_SEQUENCE 1 with NOP\n del self.block[self.index - 1 : self.index + 1]\n elif instr.arg == 2:\n # Replace BUILD_TUPLE 2 + UNPACK_SEQUENCE 2 with ROT_TWO\n rot2 = Instr("ROT_TWO", lineno=instr.lineno)\n self.block[self.index - 1 : self.index + 1] = (rot2,)\n self.index -= 1\n self.const_stack.clear()\n elif instr.arg == 3:\n # Replace BUILD_TUPLE 3 + UNPACK_SEQUENCE 3\n # with ROT_THREE + ROT_TWO\n rot3 = Instr("ROT_THREE", lineno=instr.lineno)\n rot2 = Instr("ROT_TWO", lineno=instr.lineno)\n self.block[self.index - 1 : self.index + 1] = (rot3, rot2)\n self.index -= 1\n self.const_stack.clear()\n\n def build_tuple(self, instr, container_type):\n if instr.arg > len(self.const_stack):\n return\n\n next_instr = self.get_next_instr("COMPARE_OP")\n if next_instr is None or next_instr.arg not in (Compare.IN, Compare.NOT_IN):\n return\n\n self.replace_container_of_consts(instr, container_type)\n return True\n\n def eval_BUILD_TUPLE(self, instr):\n if not instr.arg:\n return\n\n if instr.arg <= len(self.const_stack):\n self.replace_container_of_consts(instr, tuple)\n else:\n self.build_tuple_unpack_seq(instr)\n\n def eval_BUILD_LIST(self, instr):\n if not instr.arg:\n return\n\n if not self.build_tuple(instr, tuple):\n self.build_tuple_unpack_seq(instr)\n\n def eval_BUILD_SET(self, instr):\n if not instr.arg:\n return\n\n self.build_tuple(instr, frozenset)\n\n # Note: BUILD_SLICE is not optimized\n\n def eval_COMPARE_OP(self, instr):\n # Note: COMPARE_OP: 2 < 3 is not optimized\n\n try:\n new_arg = NOT_COMPARE[instr.arg]\n except KeyError:\n return\n\n if self.get_next_instr("UNARY_NOT") is None:\n return\n\n # not (a is b) --> a is not b\n # not (a in b) --> a not in b\n # not (a is not b) --> a is b\n # not (a not in b) --> a in b\n instr.arg = new_arg\n self.block[self.index - 1 : self.index + 1] = (instr,)\n\n def jump_if_or_pop(self, instr):\n # Simplify conditional jump to conditional jump where the\n # result of the first test implies the success of a similar\n # test or the failure of the opposite test.\n #\n # Arises in code like:\n # "if a and b:"\n # "if a or b:"\n # "a and b or c"\n # "(a and b) and c"\n #\n # x:JUMP_IF_FALSE_OR_POP y y:JUMP_IF_FALSE_OR_POP z\n # --> x:JUMP_IF_FALSE_OR_POP z\n #\n # x:JUMP_IF_FALSE_OR_POP y y:JUMP_IF_TRUE_OR_POP z\n # --> x:POP_JUMP_IF_FALSE y+3\n # where y+3 is the instruction following the second test.\n target_block = instr.arg\n try:\n target_instr = target_block[0]\n except IndexError:\n return\n\n if not target_instr.is_cond_jump():\n self.optimize_jump_to_cond_jump(instr)\n return\n\n if (target_instr.name in JUMPS_ON_TRUE) == (instr.name in JUMPS_ON_TRUE):\n # The second jump will be taken iff the first is.\n\n target2 = target_instr.arg\n # The current opcode inherits its target's stack behaviour\n instr.name = target_instr.name\n instr.arg = target2\n self.block[self.index - 1] = instr\n self.index -= 1\n else:\n # The second jump is not taken if the first is (so jump past it),\n # and all conditional jumps pop their argument when they're not\n # taken (so change the first jump to pop its argument when it's\n # taken).\n if instr.name in JUMPS_ON_TRUE:\n name = "POP_JUMP_IF_TRUE"\n else:\n name = "POP_JUMP_IF_FALSE"\n\n new_label = self.code.split_block(target_block, 1)\n\n instr.name = name\n instr.arg = new_label\n self.block[self.index - 1] = instr\n self.index -= 1\n\n def eval_JUMP_IF_FALSE_OR_POP(self, instr):\n self.jump_if_or_pop(instr)\n\n def eval_JUMP_IF_TRUE_OR_POP(self, instr):\n self.jump_if_or_pop(instr)\n\n def eval_NOP(self, instr):\n # Remove NOP\n del self.block[self.index - 1]\n self.index -= 1\n\n def optimize_jump_to_cond_jump(self, instr):\n # Replace jumps to unconditional jumps\n jump_label = instr.arg\n assert isinstance(jump_label, BasicBlock), jump_label\n\n try:\n target_instr = jump_label[0]\n except IndexError:\n return\n\n if instr.is_uncond_jump() and target_instr.name == "RETURN_VALUE":\n # Replace JUMP_ABSOLUTE => RETURN_VALUE with RETURN_VALUE\n self.block[self.index - 1] = target_instr\n\n elif target_instr.is_uncond_jump():\n # Replace JUMP_FORWARD t1 jumping to JUMP_FORWARD t2\n # with JUMP_ABSOLUTE t2\n jump_target2 = target_instr.arg\n\n name = instr.name\n if instr.name == "JUMP_FORWARD":\n name = "JUMP_ABSOLUTE"\n else:\n # FIXME: reimplement this check\n # if jump_target2 < 0:\n # # No backward relative jumps\n # return\n\n # FIXME: remove this workaround and implement comment code ^^\n if instr.opcode in opcode.hasjrel:\n return\n\n instr.name = name\n instr.arg = jump_target2\n self.block[self.index - 1] = instr\n\n def optimize_jump(self, instr):\n if instr.is_uncond_jump() and self.index == len(self.block):\n # JUMP_ABSOLUTE at the end of a block which points to the\n # following block: remove the jump, link the current block\n # to the following block\n block_index = self.block_index\n target_block = instr.arg\n target_block_index = self.code.get_block_index(target_block)\n if target_block_index == block_index:\n del self.block[self.index - 1]\n self.block.next_block = target_block\n return\n\n self.optimize_jump_to_cond_jump(instr)\n\n def iterblock(self, block):\n self.block = block\n self.index = 0\n while self.index < len(block):\n instr = self.block[self.index]\n self.index += 1\n yield instr\n\n def optimize_block(self, block):\n self.const_stack.clear()\n self.in_consts = False\n\n for instr in self.iterblock(block):\n if not self.in_consts:\n self.const_stack.clear()\n self.in_consts = False\n\n meth_name = "eval_%s" % instr.name\n meth = getattr(self, meth_name, None)\n if meth is not None:\n meth(instr)\n elif instr.has_jump():\n self.optimize_jump(instr)\n\n # Note: Skipping over LOAD_CONST trueconst; POP_JUMP_IF_FALSE\n # <target> is not implemented, since it looks like the optimization\n # is never trigerred in practice. The compiler already optimizes if\n # and while statements.\n\n def remove_dead_blocks(self):\n # FIXME: remove empty blocks?\n\n used_blocks = {id(self.code[0])}\n for block in self.code:\n if block.next_block is not None:\n used_blocks.add(id(block.next_block))\n for instr in block:\n if isinstance(instr, Instr) and isinstance(instr.arg, BasicBlock):\n used_blocks.add(id(instr.arg))\n\n block_index = 0\n while block_index < len(self.code):\n block = self.code[block_index]\n if id(block) not in used_blocks:\n del self.code[block_index]\n else:\n block_index += 1\n\n # FIXME: merge following blocks if block1 does not contain any\n # jump and block1.next_block is block2\n\n def optimize_cfg(self, cfg):\n self.code = cfg\n self.const_stack = []\n\n self.remove_dead_blocks()\n\n self.block_index = 0\n while self.block_index < len(self.code):\n block = self.code[self.block_index]\n self.block_index += 1\n self.optimize_block(block)\n\n def optimize(self, code_obj):\n bytecode = Bytecode.from_code(code_obj)\n cfg = ControlFlowGraph.from_bytecode(bytecode)\n\n self.optimize_cfg(cfg)\n\n bytecode = cfg.to_bytecode()\n code = bytecode.to_code()\n return code\n\n\n# Code transformer for the PEP 511\nclass CodeTransformer:\n name = "pyopt"\n\n def code_transformer(self, code, context):\n if sys.flags.verbose:\n print("Optimize %s:%s: %s" % (code.co_filename, code.co_firstlineno, code.co_name))\n optimizer = PeepholeOptimizer()\n return optimizer.optimize(code)\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\peephole_opt.py
peephole_opt.py
Python
16,182
0.95
0.206967
0.161039
react-lib
264
2025-06-29T17:12:50.025759
BSD-3-Clause
false
ac3f1562ceabbd433c5be0882d668df0
__version__ = "0.13.0.dev"\n\n__all__ = [\n "Label",\n "Instr",\n "SetLineno",\n "Bytecode",\n "ConcreteInstr",\n "ConcreteBytecode",\n "ControlFlowGraph",\n "CompilerFlags",\n "Compare",\n]\n\nfrom _pydevd_frame_eval.vendored.bytecode.flags import CompilerFlags\nfrom _pydevd_frame_eval.vendored.bytecode.instr import (\n UNSET,\n Label,\n SetLineno,\n Instr,\n CellVar,\n FreeVar, # noqa\n Compare,\n)\nfrom _pydevd_frame_eval.vendored.bytecode.bytecode import (\n BaseBytecode,\n _BaseBytecodeList,\n _InstrList,\n Bytecode,\n) # noqa\nfrom _pydevd_frame_eval.vendored.bytecode.concrete import (\n ConcreteInstr,\n ConcreteBytecode, # noqa\n # import needed to use it in bytecode.py\n _ConvertBytecodeToConcrete,\n)\nfrom _pydevd_frame_eval.vendored.bytecode.cfg import BasicBlock, ControlFlowGraph # noqa\nimport sys\n\n\ndef dump_bytecode(bytecode, *, lineno=False, stream=sys.stdout):\n def format_line(index, line):\n nonlocal cur_lineno, prev_lineno\n if lineno:\n if cur_lineno != prev_lineno:\n line = "L.% 3s % 3s: %s" % (cur_lineno, index, line)\n prev_lineno = cur_lineno\n else:\n line = " % 3s: %s" % (index, line)\n else:\n line = line\n return line\n\n def format_instr(instr, labels=None):\n text = instr.name\n arg = instr._arg\n if arg is not UNSET:\n if isinstance(arg, Label):\n try:\n arg = "<%s>" % labels[arg]\n except KeyError:\n arg = "<error: unknown label>"\n elif isinstance(arg, BasicBlock):\n try:\n arg = "<%s>" % labels[id(arg)]\n except KeyError:\n arg = "<error: unknown block>"\n else:\n arg = repr(arg)\n text = "%s %s" % (text, arg)\n return text\n\n indent = " " * 4\n\n cur_lineno = bytecode.first_lineno\n prev_lineno = None\n\n if isinstance(bytecode, ConcreteBytecode):\n offset = 0\n for instr in bytecode:\n fields = []\n if instr.lineno is not None:\n cur_lineno = instr.lineno\n if lineno:\n fields.append(format_instr(instr))\n line = "".join(fields)\n line = format_line(offset, line)\n else:\n fields.append("% 3s %s" % (offset, format_instr(instr)))\n line = "".join(fields)\n print(line, file=stream)\n\n offset += instr.size\n elif isinstance(bytecode, Bytecode):\n labels = {}\n for index, instr in enumerate(bytecode):\n if isinstance(instr, Label):\n labels[instr] = "label_instr%s" % index\n\n for index, instr in enumerate(bytecode):\n if isinstance(instr, Label):\n label = labels[instr]\n line = "%s:" % label\n if index != 0:\n print(file=stream)\n else:\n if instr.lineno is not None:\n cur_lineno = instr.lineno\n line = format_instr(instr, labels)\n line = indent + format_line(index, line)\n print(line, file=stream)\n print(file=stream)\n elif isinstance(bytecode, ControlFlowGraph):\n labels = {}\n for block_index, block in enumerate(bytecode, 1):\n labels[id(block)] = "block%s" % block_index\n\n for block_index, block in enumerate(bytecode, 1):\n print("%s:" % labels[id(block)], file=stream)\n prev_lineno = None\n for index, instr in enumerate(block):\n if instr.lineno is not None:\n cur_lineno = instr.lineno\n line = format_instr(instr, labels)\n line = indent + format_line(index, line)\n print(line, file=stream)\n if block.next_block is not None:\n print(indent + "-> %s" % labels[id(block.next_block)], file=stream)\n print(file=stream)\n else:\n raise TypeError("unknown bytecode class")\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\__init__.py
__init__.py
Python
4,284
0.95
0.19084
0.008333
react-lib
531
2025-02-04T12:23:50.789127
GPL-3.0
false
777f079876f70388a2f78d4888adf6e8
import pytest\nfrom tests_python.debugger_unittest import IS_PY36_OR_GREATER, IS_CPYTHON\nfrom tests_python.debug_constants import TEST_CYTHON\n\npytestmark = pytest.mark.skipif(not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON, reason="Requires CPython >= 3.6")\nimport unittest\n\nfrom _pydevd_frame_eval.vendored.bytecode import ConcreteBytecode, Bytecode, ControlFlowGraph\nfrom _pydevd_frame_eval.vendored.bytecode.tests import get_code\n\n\nclass CodeTests(unittest.TestCase):\n """Check that bytecode.from_code(code).to_code() returns code."""\n\n def check(self, source, function=False):\n ref_code = get_code(source, function=function)\n\n code = ConcreteBytecode.from_code(ref_code).to_code()\n self.assertEqual(code, ref_code)\n\n code = Bytecode.from_code(ref_code).to_code()\n self.assertEqual(code, ref_code)\n\n bytecode = Bytecode.from_code(ref_code)\n blocks = ControlFlowGraph.from_bytecode(bytecode)\n code = blocks.to_bytecode().to_code()\n self.assertEqual(code, ref_code)\n\n def test_loop(self):\n self.check(\n """\n for x in range(1, 10):\n x += 1\n if x == 3:\n continue\n x -= 1\n if x > 7:\n break\n x = 0\n print(x)\n """\n )\n\n def test_varargs(self):\n self.check(\n """\n def func(a, b, *varargs):\n pass\n """,\n function=True,\n )\n\n def test_kwargs(self):\n self.check(\n """\n def func(a, b, **kwargs):\n pass\n """,\n function=True,\n )\n\n def test_kwonlyargs(self):\n self.check(\n """\n def func(*, arg, arg2):\n pass\n """,\n function=True,\n )\n\n # Added because Python 3.10 added some special beahavior with respect to\n # generators in term of stack size\n def test_generator_func(self):\n self.check(\n """\n def func(arg, arg2):\n yield\n """,\n function=True,\n )\n\n def test_async_func(self):\n self.check(\n """\n async def func(arg, arg2):\n pass\n """,\n function=True,\n )\n\n\nif __name__ == "__main__":\n unittest.main() # pragma: no cover\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\tests\test_code.py
test_code.py
Python
2,518
0.95
0.268817
0.025974
vue-tools
971
2024-09-30T06:39:46.465827
GPL-3.0
true
0ce851541d2df50ff810a6d5fa74e319
import pytest\nfrom tests_python.debugger_unittest import IS_PY36_OR_GREATER, IS_CPYTHON\nfrom tests_python.debug_constants import TEST_CYTHON\n\npytestmark = pytest.mark.skipif(not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON, reason="Requires CPython >= 3.6")\nimport sys\nimport unittest\nfrom _pydevd_frame_eval.vendored.bytecode import Label, Instr, Compare, Bytecode, ControlFlowGraph\nfrom _pydevd_frame_eval.vendored.bytecode import peephole_opt\nfrom _pydevd_frame_eval.vendored.bytecode.tests import TestCase, dump_bytecode\nfrom unittest import mock\n\n\nclass Tests(TestCase):\n maxDiff = 80 * 100\n\n def optimize_blocks(self, code):\n if isinstance(code, Bytecode):\n code = ControlFlowGraph.from_bytecode(code)\n optimizer = peephole_opt.PeepholeOptimizer()\n optimizer.optimize_cfg(code)\n return code\n\n def check(self, code, *expected):\n if isinstance(code, Bytecode):\n code = ControlFlowGraph.from_bytecode(code)\n optimizer = peephole_opt.PeepholeOptimizer()\n optimizer.optimize_cfg(code)\n code = code.to_bytecode()\n\n try:\n self.assertEqual(code, expected)\n except AssertionError:\n print("Optimized code:")\n dump_bytecode(code)\n\n print("Expected code:")\n for instr in expected:\n print(instr)\n\n raise\n\n def check_dont_optimize(self, code):\n code = ControlFlowGraph.from_bytecode(code)\n noopt = code.to_bytecode()\n\n optim = self.optimize_blocks(code)\n optim = optim.to_bytecode()\n self.assertEqual(optim, noopt)\n\n def test_unary_op(self):\n def check_unary_op(op, value, result):\n code = Bytecode([Instr("LOAD_CONST", value), Instr(op), Instr("STORE_NAME", "x")])\n self.check(code, Instr("LOAD_CONST", result), Instr("STORE_NAME", "x"))\n\n check_unary_op("UNARY_POSITIVE", 2, 2)\n check_unary_op("UNARY_NEGATIVE", 3, -3)\n check_unary_op("UNARY_INVERT", 5, -6)\n\n def test_binary_op(self):\n def check_bin_op(left, op, right, result):\n code = Bytecode(\n [\n Instr("LOAD_CONST", left),\n Instr("LOAD_CONST", right),\n Instr(op),\n Instr("STORE_NAME", "x"),\n ]\n )\n self.check(code, Instr("LOAD_CONST", result), Instr("STORE_NAME", "x"))\n\n check_bin_op(10, "BINARY_ADD", 20, 30)\n check_bin_op(5, "BINARY_SUBTRACT", 1, 4)\n check_bin_op(5, "BINARY_MULTIPLY", 3, 15)\n check_bin_op(10, "BINARY_TRUE_DIVIDE", 3, 10 / 3)\n check_bin_op(10, "BINARY_FLOOR_DIVIDE", 3, 3)\n check_bin_op(10, "BINARY_MODULO", 3, 1)\n check_bin_op(2, "BINARY_POWER", 8, 256)\n check_bin_op(1, "BINARY_LSHIFT", 3, 8)\n check_bin_op(16, "BINARY_RSHIFT", 3, 2)\n check_bin_op(10, "BINARY_AND", 3, 2)\n check_bin_op(2, "BINARY_OR", 3, 3)\n check_bin_op(2, "BINARY_XOR", 3, 1)\n\n def test_combined_unary_bin_ops(self):\n # x = 1 + 3 + 7\n code = Bytecode(\n [\n Instr("LOAD_CONST", 1),\n Instr("LOAD_CONST", 3),\n Instr("BINARY_ADD"),\n Instr("LOAD_CONST", 7),\n Instr("BINARY_ADD"),\n Instr("STORE_NAME", "x"),\n ]\n )\n self.check(code, Instr("LOAD_CONST", 11), Instr("STORE_NAME", "x"))\n\n # x = ~(~(5))\n code = Bytecode(\n [\n Instr("LOAD_CONST", 5),\n Instr("UNARY_INVERT"),\n Instr("UNARY_INVERT"),\n Instr("STORE_NAME", "x"),\n ]\n )\n self.check(code, Instr("LOAD_CONST", 5), Instr("STORE_NAME", "x"))\n\n # "events = [(0, 'call'), (1, 'line'), (-(3), 'call')]"\n code = Bytecode(\n [\n Instr("LOAD_CONST", 0),\n Instr("LOAD_CONST", "call"),\n Instr("BUILD_TUPLE", 2),\n Instr("LOAD_CONST", 1),\n Instr("LOAD_CONST", "line"),\n Instr("BUILD_TUPLE", 2),\n Instr("LOAD_CONST", 3),\n Instr("UNARY_NEGATIVE"),\n Instr("LOAD_CONST", "call"),\n Instr("BUILD_TUPLE", 2),\n Instr("BUILD_LIST", 3),\n Instr("STORE_NAME", "events"),\n ]\n )\n self.check(\n code,\n Instr("LOAD_CONST", (0, "call")),\n Instr("LOAD_CONST", (1, "line")),\n Instr("LOAD_CONST", (-3, "call")),\n Instr("BUILD_LIST", 3),\n Instr("STORE_NAME", "events"),\n )\n\n # 'x = (1,) + (0,) * 8'\n code = Bytecode(\n [\n Instr("LOAD_CONST", 1),\n Instr("BUILD_TUPLE", 1),\n Instr("LOAD_CONST", 0),\n Instr("BUILD_TUPLE", 1),\n Instr("LOAD_CONST", 8),\n Instr("BINARY_MULTIPLY"),\n Instr("BINARY_ADD"),\n Instr("STORE_NAME", "x"),\n ]\n )\n zeros = (0,) * 8\n result = (1,) + zeros\n self.check(code, Instr("LOAD_CONST", result), Instr("STORE_NAME", "x"))\n\n def test_max_size(self):\n max_size = 3\n with mock.patch.object(peephole_opt, "MAX_SIZE", max_size):\n # optimized binary operation: size <= maximum size\n #\n # (9,) * size\n size = max_size\n result = (9,) * size\n code = Bytecode(\n [\n Instr("LOAD_CONST", 9),\n Instr("BUILD_TUPLE", 1),\n Instr("LOAD_CONST", size),\n Instr("BINARY_MULTIPLY"),\n Instr("STORE_NAME", "x"),\n ]\n )\n self.check(code, Instr("LOAD_CONST", result), Instr("STORE_NAME", "x"))\n\n # don't optimize binary operation: size > maximum size\n #\n # x = (9,) * size\n size = max_size + 1\n code = Bytecode(\n [\n Instr("LOAD_CONST", 9),\n Instr("BUILD_TUPLE", 1),\n Instr("LOAD_CONST", size),\n Instr("BINARY_MULTIPLY"),\n Instr("STORE_NAME", "x"),\n ]\n )\n self.check(\n code,\n Instr("LOAD_CONST", (9,)),\n Instr("LOAD_CONST", size),\n Instr("BINARY_MULTIPLY"),\n Instr("STORE_NAME", "x"),\n )\n\n def test_bin_op_dont_optimize(self):\n # 1 / 0\n code = Bytecode(\n [\n Instr("LOAD_CONST", 1),\n Instr("LOAD_CONST", 0),\n Instr("BINARY_TRUE_DIVIDE"),\n Instr("POP_TOP"),\n Instr("LOAD_CONST", None),\n Instr("RETURN_VALUE"),\n ]\n )\n self.check_dont_optimize(code)\n\n # 1 // 0\n code = Bytecode(\n [\n Instr("LOAD_CONST", 1),\n Instr("LOAD_CONST", 0),\n Instr("BINARY_FLOOR_DIVIDE"),\n Instr("POP_TOP"),\n Instr("LOAD_CONST", None),\n Instr("RETURN_VALUE"),\n ]\n )\n self.check_dont_optimize(code)\n\n # 1 % 0\n code = Bytecode(\n [\n Instr("LOAD_CONST", 1),\n Instr("LOAD_CONST", 0),\n Instr("BINARY_MODULO"),\n Instr("POP_TOP"),\n Instr("LOAD_CONST", None),\n Instr("RETURN_VALUE"),\n ]\n )\n self.check_dont_optimize(code)\n\n # 1 % 1j\n code = Bytecode(\n [\n Instr("LOAD_CONST", 1),\n Instr("LOAD_CONST", 1j),\n Instr("BINARY_MODULO"),\n Instr("POP_TOP"),\n Instr("LOAD_CONST", None),\n Instr("RETURN_VALUE"),\n ]\n )\n self.check_dont_optimize(code)\n\n def test_build_tuple(self):\n # x = (1, 2, 3)\n code = Bytecode(\n [\n Instr("LOAD_CONST", 1),\n Instr("LOAD_CONST", 2),\n Instr("LOAD_CONST", 3),\n Instr("BUILD_TUPLE", 3),\n Instr("STORE_NAME", "x"),\n ]\n )\n self.check(code, Instr("LOAD_CONST", (1, 2, 3)), Instr("STORE_NAME", "x"))\n\n def test_build_list(self):\n # test = x in [1, 2, 3]\n code = Bytecode(\n [\n Instr("LOAD_NAME", "x"),\n Instr("LOAD_CONST", 1),\n Instr("LOAD_CONST", 2),\n Instr("LOAD_CONST", 3),\n Instr("BUILD_LIST", 3),\n Instr("COMPARE_OP", Compare.IN),\n Instr("STORE_NAME", "test"),\n ]\n )\n\n self.check(\n code,\n Instr("LOAD_NAME", "x"),\n Instr("LOAD_CONST", (1, 2, 3)),\n Instr("COMPARE_OP", Compare.IN),\n Instr("STORE_NAME", "test"),\n )\n\n def test_build_list_unpack_seq(self):\n for build_list in ("BUILD_TUPLE", "BUILD_LIST"):\n # x, = [a]\n code = Bytecode(\n [\n Instr("LOAD_NAME", "a"),\n Instr(build_list, 1),\n Instr("UNPACK_SEQUENCE", 1),\n Instr("STORE_NAME", "x"),\n ]\n )\n self.check(code, Instr("LOAD_NAME", "a"), Instr("STORE_NAME", "x"))\n\n # x, y = [a, b]\n code = Bytecode(\n [\n Instr("LOAD_NAME", "a"),\n Instr("LOAD_NAME", "b"),\n Instr(build_list, 2),\n Instr("UNPACK_SEQUENCE", 2),\n Instr("STORE_NAME", "x"),\n Instr("STORE_NAME", "y"),\n ]\n )\n self.check(\n code,\n Instr("LOAD_NAME", "a"),\n Instr("LOAD_NAME", "b"),\n Instr("ROT_TWO"),\n Instr("STORE_NAME", "x"),\n Instr("STORE_NAME", "y"),\n )\n\n # x, y, z = [a, b, c]\n code = Bytecode(\n [\n Instr("LOAD_NAME", "a"),\n Instr("LOAD_NAME", "b"),\n Instr("LOAD_NAME", "c"),\n Instr(build_list, 3),\n Instr("UNPACK_SEQUENCE", 3),\n Instr("STORE_NAME", "x"),\n Instr("STORE_NAME", "y"),\n Instr("STORE_NAME", "z"),\n ]\n )\n self.check(\n code,\n Instr("LOAD_NAME", "a"),\n Instr("LOAD_NAME", "b"),\n Instr("LOAD_NAME", "c"),\n Instr("ROT_THREE"),\n Instr("ROT_TWO"),\n Instr("STORE_NAME", "x"),\n Instr("STORE_NAME", "y"),\n Instr("STORE_NAME", "z"),\n )\n\n def test_build_tuple_unpack_seq_const(self):\n # x, y = (3, 4)\n code = Bytecode(\n [\n Instr("LOAD_CONST", 3),\n Instr("LOAD_CONST", 4),\n Instr("BUILD_TUPLE", 2),\n Instr("UNPACK_SEQUENCE", 2),\n Instr("STORE_NAME", "x"),\n Instr("STORE_NAME", "y"),\n ]\n )\n self.check(\n code,\n Instr("LOAD_CONST", (3, 4)),\n Instr("UNPACK_SEQUENCE", 2),\n Instr("STORE_NAME", "x"),\n Instr("STORE_NAME", "y"),\n )\n\n def test_build_list_unpack_seq_const(self):\n # x, y, z = [3, 4, 5]\n code = Bytecode(\n [\n Instr("LOAD_CONST", 3),\n Instr("LOAD_CONST", 4),\n Instr("LOAD_CONST", 5),\n Instr("BUILD_LIST", 3),\n Instr("UNPACK_SEQUENCE", 3),\n Instr("STORE_NAME", "x"),\n Instr("STORE_NAME", "y"),\n Instr("STORE_NAME", "z"),\n ]\n )\n self.check(\n code,\n Instr("LOAD_CONST", 5),\n Instr("LOAD_CONST", 4),\n Instr("LOAD_CONST", 3),\n Instr("STORE_NAME", "x"),\n Instr("STORE_NAME", "y"),\n Instr("STORE_NAME", "z"),\n )\n\n def test_build_set(self):\n # test = x in {1, 2, 3}\n code = Bytecode(\n [\n Instr("LOAD_NAME", "x"),\n Instr("LOAD_CONST", 1),\n Instr("LOAD_CONST", 2),\n Instr("LOAD_CONST", 3),\n Instr("BUILD_SET", 3),\n Instr("COMPARE_OP", Compare.IN),\n Instr("STORE_NAME", "test"),\n ]\n )\n\n self.check(\n code,\n Instr("LOAD_NAME", "x"),\n Instr("LOAD_CONST", frozenset((1, 2, 3))),\n Instr("COMPARE_OP", Compare.IN),\n Instr("STORE_NAME", "test"),\n )\n\n def test_compare_op_unary_not(self):\n for op, not_op in (\n (Compare.IN, Compare.NOT_IN), # in => not in\n (Compare.NOT_IN, Compare.IN), # not in => in\n (Compare.IS, Compare.IS_NOT), # is => is not\n (Compare.IS_NOT, Compare.IS), # is not => is\n ):\n code = Bytecode(\n [\n Instr("LOAD_NAME", "a"),\n Instr("LOAD_NAME", "b"),\n Instr("COMPARE_OP", op),\n Instr("UNARY_NOT"),\n Instr("STORE_NAME", "x"),\n ]\n )\n self.check(\n code,\n Instr("LOAD_NAME", "a"),\n Instr("LOAD_NAME", "b"),\n Instr("COMPARE_OP", not_op),\n Instr("STORE_NAME", "x"),\n )\n\n # don't optimize:\n # x = not (a and b is True)\n label_instr5 = Label()\n code = Bytecode(\n [\n Instr("LOAD_NAME", "a"),\n Instr("JUMP_IF_FALSE_OR_POP", label_instr5),\n Instr("LOAD_NAME", "b"),\n Instr("LOAD_CONST", True),\n Instr("COMPARE_OP", Compare.IS),\n label_instr5,\n Instr("UNARY_NOT"),\n Instr("STORE_NAME", "x"),\n Instr("LOAD_CONST", None),\n Instr("RETURN_VALUE"),\n ]\n )\n self.check_dont_optimize(code)\n\n def test_dont_optimize(self):\n # x = 3 < 5\n code = Bytecode(\n [\n Instr("LOAD_CONST", 3),\n Instr("LOAD_CONST", 5),\n Instr("COMPARE_OP", Compare.LT),\n Instr("STORE_NAME", "x"),\n Instr("LOAD_CONST", None),\n Instr("RETURN_VALUE"),\n ]\n )\n self.check_dont_optimize(code)\n\n # x = (10, 20, 30)[1:]\n code = Bytecode(\n [\n Instr("LOAD_CONST", (10, 20, 30)),\n Instr("LOAD_CONST", 1),\n Instr("LOAD_CONST", None),\n Instr("BUILD_SLICE", 2),\n Instr("BINARY_SUBSCR"),\n Instr("STORE_NAME", "x"),\n ]\n )\n self.check_dont_optimize(code)\n\n def test_optimize_code_obj(self):\n # Test optimize() method with a code object\n #\n # x = 3 + 5 => x = 8\n noopt = Bytecode(\n [\n Instr("LOAD_CONST", 3),\n Instr("LOAD_CONST", 5),\n Instr("BINARY_ADD"),\n Instr("STORE_NAME", "x"),\n Instr("LOAD_CONST", None),\n Instr("RETURN_VALUE"),\n ]\n )\n noopt = noopt.to_code()\n\n optimizer = peephole_opt.PeepholeOptimizer()\n optim = optimizer.optimize(noopt)\n\n code = Bytecode.from_code(optim)\n self.assertEqual(\n code,\n [\n Instr("LOAD_CONST", 8, lineno=1),\n Instr("STORE_NAME", "x", lineno=1),\n Instr("LOAD_CONST", None, lineno=1),\n Instr("RETURN_VALUE", lineno=1),\n ],\n )\n\n def test_return_value(self):\n # return+return: remove second return\n #\n # def func():\n # return 4\n # return 5\n code = Bytecode(\n [\n Instr("LOAD_CONST", 4, lineno=2),\n Instr("RETURN_VALUE", lineno=2),\n Instr("LOAD_CONST", 5, lineno=3),\n Instr("RETURN_VALUE", lineno=3),\n ]\n )\n code = ControlFlowGraph.from_bytecode(code)\n self.check(code, Instr("LOAD_CONST", 4, lineno=2), Instr("RETURN_VALUE", lineno=2))\n\n # return+return + return+return: remove second and fourth return\n #\n # def func():\n # return 4\n # return 5\n # return 6\n # return 7\n code = Bytecode(\n [\n Instr("LOAD_CONST", 4, lineno=2),\n Instr("RETURN_VALUE", lineno=2),\n Instr("LOAD_CONST", 5, lineno=3),\n Instr("RETURN_VALUE", lineno=3),\n Instr("LOAD_CONST", 6, lineno=4),\n Instr("RETURN_VALUE", lineno=4),\n Instr("LOAD_CONST", 7, lineno=5),\n Instr("RETURN_VALUE", lineno=5),\n ]\n )\n code = ControlFlowGraph.from_bytecode(code)\n self.check(code, Instr("LOAD_CONST", 4, lineno=2), Instr("RETURN_VALUE", lineno=2))\n\n # return + JUMP_ABSOLUTE: remove JUMP_ABSOLUTE\n # while 1:\n # return 7\n if sys.version_info < (3, 8):\n setup_loop = Label()\n return_label = Label()\n code = Bytecode(\n [\n setup_loop,\n Instr("SETUP_LOOP", return_label, lineno=2),\n Instr("LOAD_CONST", 7, lineno=3),\n Instr("RETURN_VALUE", lineno=3),\n Instr("JUMP_ABSOLUTE", setup_loop, lineno=3),\n Instr("POP_BLOCK", lineno=3),\n return_label,\n Instr("LOAD_CONST", None, lineno=3),\n Instr("RETURN_VALUE", lineno=3),\n ]\n )\n code = ControlFlowGraph.from_bytecode(code)\n\n end_loop = Label()\n self.check(\n code,\n Instr("SETUP_LOOP", end_loop, lineno=2),\n Instr("LOAD_CONST", 7, lineno=3),\n Instr("RETURN_VALUE", lineno=3),\n end_loop,\n Instr("LOAD_CONST", None, lineno=3),\n Instr("RETURN_VALUE", lineno=3),\n )\n else:\n setup_loop = Label()\n return_label = Label()\n code = Bytecode(\n [\n setup_loop,\n Instr("LOAD_CONST", 7, lineno=3),\n Instr("RETURN_VALUE", lineno=3),\n Instr("JUMP_ABSOLUTE", setup_loop, lineno=3),\n Instr("LOAD_CONST", None, lineno=3),\n Instr("RETURN_VALUE", lineno=3),\n ]\n )\n code = ControlFlowGraph.from_bytecode(code)\n\n self.check(code, Instr("LOAD_CONST", 7, lineno=3), Instr("RETURN_VALUE", lineno=3))\n\n def test_not_jump_if_false(self):\n # Replace UNARY_NOT+POP_JUMP_IF_FALSE with POP_JUMP_IF_TRUE\n #\n # if not x:\n # y = 9\n label = Label()\n code = Bytecode(\n [\n Instr("LOAD_NAME", "x"),\n Instr("UNARY_NOT"),\n Instr("POP_JUMP_IF_FALSE", label),\n Instr("LOAD_CONST", 9),\n Instr("STORE_NAME", "y"),\n label,\n ]\n )\n\n code = self.optimize_blocks(code)\n label = Label()\n self.check(\n code,\n Instr("LOAD_NAME", "x"),\n Instr("POP_JUMP_IF_TRUE", label),\n Instr("LOAD_CONST", 9),\n Instr("STORE_NAME", "y"),\n label,\n )\n\n def test_unconditional_jump_to_return(self):\n # def func():\n # if test:\n # if test2:\n # x = 10\n # else:\n # x = 20\n # else:\n # x = 30\n\n label_instr11 = Label()\n label_instr14 = Label()\n label_instr7 = Label()\n code = Bytecode(\n [\n Instr("LOAD_GLOBAL", "test", lineno=2),\n Instr("POP_JUMP_IF_FALSE", label_instr11, lineno=2),\n Instr("LOAD_GLOBAL", "test2", lineno=3),\n Instr("POP_JUMP_IF_FALSE", label_instr7, lineno=3),\n Instr("LOAD_CONST", 10, lineno=4),\n Instr("STORE_FAST", "x", lineno=4),\n Instr("JUMP_ABSOLUTE", label_instr14, lineno=4),\n label_instr7,\n Instr("LOAD_CONST", 20, lineno=6),\n Instr("STORE_FAST", "x", lineno=6),\n Instr("JUMP_FORWARD", label_instr14, lineno=6),\n label_instr11,\n Instr("LOAD_CONST", 30, lineno=8),\n Instr("STORE_FAST", "x", lineno=8),\n label_instr14,\n Instr("LOAD_CONST", None, lineno=8),\n Instr("RETURN_VALUE", lineno=8),\n ]\n )\n\n label1 = Label()\n label3 = Label()\n label4 = Label()\n self.check(\n code,\n Instr("LOAD_GLOBAL", "test", lineno=2),\n Instr("POP_JUMP_IF_FALSE", label3, lineno=2),\n Instr("LOAD_GLOBAL", "test2", lineno=3),\n Instr("POP_JUMP_IF_FALSE", label1, lineno=3),\n Instr("LOAD_CONST", 10, lineno=4),\n Instr("STORE_FAST", "x", lineno=4),\n Instr("JUMP_ABSOLUTE", label4, lineno=4),\n label1,\n Instr("LOAD_CONST", 20, lineno=6),\n Instr("STORE_FAST", "x", lineno=6),\n Instr("JUMP_FORWARD", label4, lineno=6),\n label3,\n Instr("LOAD_CONST", 30, lineno=8),\n Instr("STORE_FAST", "x", lineno=8),\n label4,\n Instr("LOAD_CONST", None, lineno=8),\n Instr("RETURN_VALUE", lineno=8),\n )\n\n def test_unconditional_jumps(self):\n # def func():\n # if x:\n # if y:\n # func()\n label_instr7 = Label()\n code = Bytecode(\n [\n Instr("LOAD_GLOBAL", "x", lineno=2),\n Instr("POP_JUMP_IF_FALSE", label_instr7, lineno=2),\n Instr("LOAD_GLOBAL", "y", lineno=3),\n Instr("POP_JUMP_IF_FALSE", label_instr7, lineno=3),\n Instr("LOAD_GLOBAL", "func", lineno=4),\n Instr("CALL_FUNCTION", 0, lineno=4),\n Instr("POP_TOP", lineno=4),\n label_instr7,\n Instr("LOAD_CONST", None, lineno=4),\n Instr("RETURN_VALUE", lineno=4),\n ]\n )\n\n label_return = Label()\n self.check(\n code,\n Instr("LOAD_GLOBAL", "x", lineno=2),\n Instr("POP_JUMP_IF_FALSE", label_return, lineno=2),\n Instr("LOAD_GLOBAL", "y", lineno=3),\n Instr("POP_JUMP_IF_FALSE", label_return, lineno=3),\n Instr("LOAD_GLOBAL", "func", lineno=4),\n Instr("CALL_FUNCTION", 0, lineno=4),\n Instr("POP_TOP", lineno=4),\n label_return,\n Instr("LOAD_CONST", None, lineno=4),\n Instr("RETURN_VALUE", lineno=4),\n )\n\n def test_jump_to_return(self):\n # def func(condition):\n # return 'yes' if condition else 'no'\n label_instr4 = Label()\n label_instr6 = Label()\n code = Bytecode(\n [\n Instr("LOAD_FAST", "condition"),\n Instr("POP_JUMP_IF_FALSE", label_instr4),\n Instr("LOAD_CONST", "yes"),\n Instr("JUMP_FORWARD", label_instr6),\n label_instr4,\n Instr("LOAD_CONST", "no"),\n label_instr6,\n Instr("RETURN_VALUE"),\n ]\n )\n\n label = Label()\n self.check(\n code,\n Instr("LOAD_FAST", "condition"),\n Instr("POP_JUMP_IF_FALSE", label),\n Instr("LOAD_CONST", "yes"),\n Instr("RETURN_VALUE"),\n label,\n Instr("LOAD_CONST", "no"),\n Instr("RETURN_VALUE"),\n )\n\n def test_jump_if_true_to_jump_if_false(self):\n # Replace JUMP_IF_TRUE_OR_POP jumping to POP_JUMP_IF_FALSE <target>\n # with POP_JUMP_IF_TRUE <offset after the second POP_JUMP_IF_FALSE>\n #\n # if x or y:\n # z = 1\n\n label_instr3 = Label()\n label_instr7 = Label()\n code = Bytecode(\n [\n Instr("LOAD_NAME", "x"),\n Instr("JUMP_IF_TRUE_OR_POP", label_instr3),\n Instr("LOAD_NAME", "y"),\n label_instr3,\n Instr("POP_JUMP_IF_FALSE", label_instr7),\n Instr("LOAD_CONST", 1),\n Instr("STORE_NAME", "z"),\n label_instr7,\n Instr("LOAD_CONST", None),\n Instr("RETURN_VALUE"),\n ]\n )\n\n label_instr4 = Label()\n label_instr7 = Label()\n self.check(\n code,\n Instr("LOAD_NAME", "x"),\n Instr("POP_JUMP_IF_TRUE", label_instr4),\n Instr("LOAD_NAME", "y"),\n Instr("POP_JUMP_IF_FALSE", label_instr7),\n label_instr4,\n Instr("LOAD_CONST", 1),\n Instr("STORE_NAME", "z"),\n label_instr7,\n Instr("LOAD_CONST", None),\n Instr("RETURN_VALUE"),\n )\n\n def test_jump_if_false_to_jump_if_false(self):\n # Replace JUMP_IF_FALSE_OR_POP jumping to POP_JUMP_IF_FALSE <label>\n # with POP_JUMP_IF_FALSE <label>\n #\n # while n > 0 and start > 3:\n # func()\n if sys.version_info < (3, 8):\n label_instr1 = Label()\n label_instr15 = Label()\n label_instr17 = Label()\n label_instr9 = Label()\n code = Bytecode(\n [\n Instr("SETUP_LOOP", label_instr17),\n label_instr1,\n Instr("LOAD_NAME", "n"),\n Instr("LOAD_CONST", 0),\n Instr("COMPARE_OP", Compare.GT),\n # JUMP_IF_FALSE_OR_POP jumps to POP_JUMP_IF_FALSE\n # which jumps to label_instr15\n Instr("JUMP_IF_FALSE_OR_POP", label_instr9),\n Instr("LOAD_NAME", "start"),\n Instr("LOAD_CONST", 3),\n Instr("COMPARE_OP", Compare.GT),\n label_instr9,\n Instr("POP_JUMP_IF_FALSE", label_instr15),\n Instr("LOAD_NAME", "func"),\n Instr("CALL_FUNCTION", 0),\n Instr("POP_TOP"),\n Instr("JUMP_ABSOLUTE", label_instr1),\n label_instr15,\n Instr("POP_BLOCK"),\n label_instr17,\n Instr("LOAD_CONST", None),\n Instr("RETURN_VALUE"),\n ]\n )\n\n label_instr1 = Label()\n label_instr14 = Label()\n label_instr16 = Label()\n self.check(\n code,\n Instr("SETUP_LOOP", label_instr16),\n label_instr1,\n Instr("LOAD_NAME", "n"),\n Instr("LOAD_CONST", 0),\n Instr("COMPARE_OP", Compare.GT),\n Instr("POP_JUMP_IF_FALSE", label_instr14),\n Instr("LOAD_NAME", "start"),\n Instr("LOAD_CONST", 3),\n Instr("COMPARE_OP", Compare.GT),\n Instr("POP_JUMP_IF_FALSE", label_instr14),\n Instr("LOAD_NAME", "func"),\n Instr("CALL_FUNCTION", 0),\n Instr("POP_TOP"),\n Instr("JUMP_ABSOLUTE", label_instr1),\n label_instr14,\n Instr("POP_BLOCK"),\n label_instr16,\n Instr("LOAD_CONST", None),\n Instr("RETURN_VALUE"),\n )\n else:\n label_instr1 = Label()\n label_instr15 = Label()\n label_instr9 = Label()\n code = Bytecode(\n [\n label_instr1,\n Instr("LOAD_NAME", "n"),\n Instr("LOAD_CONST", 0),\n Instr("COMPARE_OP", Compare.GT),\n # JUMP_IF_FALSE_OR_POP jumps to POP_JUMP_IF_FALSE\n # which jumps to label_instr15\n Instr("JUMP_IF_FALSE_OR_POP", label_instr9),\n Instr("LOAD_NAME", "start"),\n Instr("LOAD_CONST", 3),\n Instr("COMPARE_OP", Compare.GT),\n label_instr9,\n Instr("POP_JUMP_IF_FALSE", label_instr15),\n Instr("LOAD_NAME", "func"),\n Instr("CALL_FUNCTION", 0),\n Instr("POP_TOP"),\n Instr("JUMP_ABSOLUTE", label_instr1),\n label_instr15,\n Instr("LOAD_CONST", None),\n Instr("RETURN_VALUE"),\n ]\n )\n\n label_instr1 = Label()\n label_instr14 = Label()\n self.check(\n code,\n label_instr1,\n Instr("LOAD_NAME", "n"),\n Instr("LOAD_CONST", 0),\n Instr("COMPARE_OP", Compare.GT),\n Instr("POP_JUMP_IF_FALSE", label_instr14),\n Instr("LOAD_NAME", "start"),\n Instr("LOAD_CONST", 3),\n Instr("COMPARE_OP", Compare.GT),\n Instr("POP_JUMP_IF_FALSE", label_instr14),\n Instr("LOAD_NAME", "func"),\n Instr("CALL_FUNCTION", 0),\n Instr("POP_TOP"),\n Instr("JUMP_ABSOLUTE", label_instr1),\n label_instr14,\n Instr("LOAD_CONST", None),\n Instr("RETURN_VALUE"),\n )\n\n def test_nop(self):\n code = Bytecode([Instr("LOAD_NAME", "x"), Instr("NOP"), Instr("STORE_NAME", "test")])\n\n self.check(code, Instr("LOAD_NAME", "x"), Instr("STORE_NAME", "test"))\n\n def test_dead_code_jump(self):\n label = Label()\n code = Bytecode(\n [\n Instr("LOAD_NAME", "x"),\n Instr("JUMP_ABSOLUTE", label),\n # dead code\n Instr("LOAD_NAME", "y"),\n Instr("STORE_NAME", "test"),\n label,\n Instr("STORE_NAME", "test"),\n ]\n )\n\n self.check(code, Instr("LOAD_NAME", "x"), Instr("STORE_NAME", "test"))\n\n def test_uncond_jump_to_uncond_jump(self):\n # Replace JUMP_FORWARD t1 jumping to JUMP_FORWARD t2\n # with JUMP_ABSOLUTE t2\n\n label = Label()\n label2 = Label()\n label3 = Label()\n label4 = Label()\n code = Bytecode(\n [\n Instr("LOAD_NAME", "test"),\n Instr("POP_JUMP_IF_TRUE", label),\n # redundant jump\n Instr("JUMP_FORWARD", label2),\n label,\n Instr("LOAD_CONST", 1),\n Instr("STORE_NAME", "x"),\n Instr("LOAD_NAME", "test"),\n Instr("POP_JUMP_IF_TRUE", label3),\n label2,\n Instr("JUMP_FORWARD", label4),\n label3,\n Instr("LOAD_CONST", 1),\n Instr("STORE_NAME", "x"),\n label4,\n Instr("LOAD_CONST", None),\n Instr("RETURN_VALUE"),\n ]\n )\n\n label = Label()\n label3 = Label()\n label4 = Label()\n self.check(\n code,\n Instr("LOAD_NAME", "test"),\n Instr("POP_JUMP_IF_TRUE", label),\n # JUMP_FORWARD label2 was replaced with JUMP_ABSOLUTE label4\n Instr("JUMP_ABSOLUTE", label4),\n label,\n Instr("LOAD_CONST", 1),\n Instr("STORE_NAME", "x"),\n Instr("LOAD_NAME", "test"),\n Instr("POP_JUMP_IF_TRUE", label3),\n Instr("JUMP_FORWARD", label4),\n label3,\n Instr("LOAD_CONST", 1),\n Instr("STORE_NAME", "x"),\n label4,\n Instr("LOAD_CONST", None),\n Instr("RETURN_VALUE"),\n )\n\n\nif __name__ == "__main__":\n unittest.main() # pragma: no cover\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\tests\test_peephole_opt.py
test_peephole_opt.py
Python
33,840
0.95
0.054415
0.089602
node-utils
596
2023-11-24T11:16:31.175923
Apache-2.0
true
313933275ba62e2b0716c70f43ac219e
from __future__ import annotations\n\nimport textwrap\nimport types\n\n\ndef get_code(source, *, filename="<string>", function=False):\n source = textwrap.dedent(source).strip()\n code = compile(source, filename, "exec")\n if function:\n sub_code = [const for const in code.co_consts if isinstance(const, types.CodeType)]\n if len(sub_code) != 1:\n raise ValueError("unable to find function code")\n code = sub_code[0]\n return code\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\tests\util_annotation.py
util_annotation.py
Python
478
0.85
0.533333
0
react-lib
182
2024-07-19T07:41:16.704698
MIT
true
87391b7d9bf3ab052f2953e66d2d8207
import sys\nimport textwrap\nimport types\nimport unittest\n\nfrom _pydevd_frame_eval.vendored.bytecode import (\n UNSET,\n Label,\n Instr,\n ConcreteInstr,\n BasicBlock, # noqa\n Bytecode,\n ControlFlowGraph,\n ConcreteBytecode,\n)\n\n\ndef _format_instr_list(block, labels, lineno):\n instr_list = []\n for instr in block:\n if not isinstance(instr, Label):\n if isinstance(instr, ConcreteInstr):\n cls_name = "ConcreteInstr"\n else:\n cls_name = "Instr"\n arg = instr.arg\n if arg is not UNSET:\n if isinstance(arg, Label):\n arg = labels[arg]\n elif isinstance(arg, BasicBlock):\n arg = labels[id(arg)]\n else:\n arg = repr(arg)\n if lineno:\n text = "%s(%r, %s, lineno=%s)" % (\n cls_name,\n instr.name,\n arg,\n instr.lineno,\n )\n else:\n text = "%s(%r, %s)" % (cls_name, instr.name, arg)\n else:\n if lineno:\n text = "%s(%r, lineno=%s)" % (cls_name, instr.name, instr.lineno)\n else:\n text = "%s(%r)" % (cls_name, instr.name)\n else:\n text = labels[instr]\n instr_list.append(text)\n return "[%s]" % ",\n ".join(instr_list)\n\n\ndef dump_bytecode(code, lineno=False):\n """\n Use this function to write unit tests: copy/paste its output to\n write a self.assertBlocksEqual() check.\n """\n print()\n\n if isinstance(code, (Bytecode, ConcreteBytecode)):\n is_concrete = isinstance(code, ConcreteBytecode)\n if is_concrete:\n block = list(code)\n else:\n block = code\n\n indent = " " * 8\n labels = {}\n for index, instr in enumerate(block):\n if isinstance(instr, Label):\n name = "label_instr%s" % index\n labels[instr] = name\n\n if is_concrete:\n name = "ConcreteBytecode"\n print(indent + "code = %s()" % name)\n if code.argcount:\n print(indent + "code.argcount = %s" % code.argcount)\n if sys.version_info > (3, 8):\n if code.posonlyargcount:\n print(indent + "code.posonlyargcount = %s" % code.posonlyargcount)\n if code.kwonlyargcount:\n print(indent + "code.kwargonlycount = %s" % code.kwonlyargcount)\n print(indent + "code.flags = %#x" % code.flags)\n if code.consts:\n print(indent + "code.consts = %r" % code.consts)\n if code.names:\n print(indent + "code.names = %r" % code.names)\n if code.varnames:\n print(indent + "code.varnames = %r" % code.varnames)\n\n for name in sorted(labels.values()):\n print(indent + "%s = Label()" % name)\n\n if is_concrete:\n text = indent + "code.extend("\n indent = " " * len(text)\n else:\n text = indent + "code = Bytecode("\n indent = " " * len(text)\n\n lines = _format_instr_list(code, labels, lineno).splitlines()\n last_line = len(lines) - 1\n for index, line in enumerate(lines):\n if index == 0:\n print(text + lines[0])\n elif index == last_line:\n print(indent + line + ")")\n else:\n print(indent + line)\n\n print()\n else:\n assert isinstance(code, ControlFlowGraph)\n labels = {}\n for block_index, block in enumerate(code):\n labels[id(block)] = "code[%s]" % block_index\n\n for block_index, block in enumerate(code):\n text = _format_instr_list(block, labels, lineno)\n if block_index != len(code) - 1:\n text += ","\n print(text)\n print()\n\n\ndef get_code(source, *, filename="<string>", function=False):\n source = textwrap.dedent(source).strip()\n code = compile(source, filename, "exec")\n if function:\n sub_code = [const for const in code.co_consts if isinstance(const, types.CodeType)]\n if len(sub_code) != 1:\n raise ValueError("unable to find function code")\n code = sub_code[0]\n return code\n\n\ndef disassemble(source, *, filename="<string>", function=False):\n code = get_code(source, filename=filename, function=function)\n return Bytecode.from_code(code)\n\n\nclass TestCase(unittest.TestCase):\n def assertBlocksEqual(self, code, *expected_blocks):\n self.assertEqual(len(code), len(expected_blocks))\n\n for block1, block2 in zip(code, expected_blocks):\n block_index = code.get_block_index(block1)\n self.assertListEqual(list(block1), block2, "Block #%s is different" % block_index)\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\tests\__init__.py
__init__.py
Python
5,094
0.95
0.293333
0
vue-tools
18
2025-03-02T10:11:28.519117
Apache-2.0
true
8d5c3053a0c7c16ab2abee5761319f34
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\tests\__pycache__\test_bytecode.cpython-313.pyc
test_bytecode.cpython-313.pyc
Other
21,048
0.95
0.013605
0.032609
vue-tools
95
2024-01-26T19:07:55.711250
GPL-3.0
true
9e1eb80bdbd04c9a03cda07595072e8e
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\tests\__pycache__\test_cfg.cpython-313.pyc
test_cfg.cpython-313.pyc
Other
38,548
0.95
0.006536
0.064748
awesome-app
782
2024-12-18T19:21:04.716377
Apache-2.0
true
d63e8be59553d1773f74f0ab8d4cd819
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\tests\__pycache__\test_code.cpython-313.pyc
test_code.cpython-313.pyc
Other
3,760
0.95
0.132353
0
awesome-app
923
2024-05-27T04:58:56.257749
MIT
true
9c4c7ae1bed820fed46138efd5df5f65
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\tests\__pycache__\test_concrete.cpython-313.pyc
test_concrete.cpython-313.pyc
Other
54,726
0.95
0.007742
0.055046
react-lib
619
2024-07-12T05:16:08.645284
BSD-3-Clause
true
2975ae2d745b190512750441d22036c0
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\tests\__pycache__\test_flags.cpython-313.pyc
test_flags.cpython-313.pyc
Other
8,773
0.8
0
0.0875
vue-tools
623
2024-02-04T11:01:58.232727
Apache-2.0
true
2c390b3a58986f42f123055762b33b4e
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\tests\__pycache__\test_instr.cpython-313.pyc
test_instr.cpython-313.pyc
Other
17,394
0.95
0
0.036458
node-utils
503
2024-04-16T10:17:30.248872
Apache-2.0
true
3de68e31e941beed79ea9d9ea0e631b1
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\tests\__pycache__\test_misc.cpython-313.pyc
test_misc.cpython-313.pyc
Other
9,512
0.95
0.013761
0
node-utils
97
2025-03-25T20:48:53.396018
BSD-3-Clause
true
a441164530914644788e9327e41f5f36
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\tests\__pycache__\test_peephole_opt.cpython-313.pyc
test_peephole_opt.cpython-313.pyc
Other
30,267
0.8
0
0
vue-tools
102
2024-01-12T00:10:20.535372
BSD-3-Clause
true
a725d7ca3cbcbe6fbcb38b1a18702c17
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\tests\__pycache__\util_annotation.cpython-313.pyc
util_annotation.cpython-313.pyc
Other
1,095
0.95
0.090909
0.2
vue-tools
368
2024-04-05T09:02:28.481781
MIT
true
2e0f8219adeadd093e9efc281c7b2af7
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\tests\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
6,730
0.95
0.037037
0.039216
awesome-app
2
2024-05-08T11:08:21.409849
BSD-3-Clause
true
aaccfd3658fda727dc617229092fe90f
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\__pycache__\bytecode.cpython-313.pyc
bytecode.cpython-313.pyc
Other
11,053
0.95
0.013333
0
node-utils
61
2024-04-14T02:04:39.210916
GPL-3.0
false
fa3eaf0ffa2d1c02d5aa432433acd1e0
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\__pycache__\cfg.cpython-313.pyc
cfg.cpython-313.pyc
Other
17,832
0.95
0.052288
0
awesome-app
966
2023-12-14T21:11:10.935097
GPL-3.0
false
b37e29f970abc641de96f4de935d5f20
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\__pycache__\concrete.cpython-313.pyc
concrete.cpython-313.pyc
Other
27,906
0.95
0
0
awesome-app
6
2024-09-05T11:10:20.349313
GPL-3.0
false
b8a16f775f4562723758836c70157a65
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\__pycache__\flags.cpython-313.pyc
flags.cpython-313.pyc
Other
5,397
0.95
0.112903
0
node-utils
940
2025-01-13T12:02:02.415130
MIT
false
999b99ec0350070fb64b9d0cb38996e6
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\__pycache__\instr.cpython-313.pyc
instr.cpython-313.pyc
Other
16,970
0.95
0
0.02439
node-utils
124
2025-03-30T08:24:16.830773
Apache-2.0
false
fefe2c5107c1a4e09fcf62d00e1c0006
\n\n
.venv\Lib\site-packages\debugpy\_vendored\pydevd\_pydevd_frame_eval\vendored\bytecode\__pycache__\peephole_opt.cpython-313.pyc
peephole_opt.cpython-313.pyc
Other
21,869
0.8
0.013245
0.022222
python-kit
966
2023-12-25T11:07:30.529437
BSD-3-Clause
false
911945dec40a2f4bbafb4991c7c30386